repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
quattor/aquilon | lib/aquilon/worker/commands/grant_root_access.py | Python | apache-2.0 | 2,384 | 0.000839 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agr | eed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq grant root access`."""
from aquilon.aqdb.model import Personality, User, | NetGroupWhiteList
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.change_management import ChangeManagement
class CommandGrantRootAccess(BrokerCommand):
requires_plenaries = True
required_parameters = ['personality', 'justification']
def _update_dbobj(self, obj, dbuser=None, dbnetgroup=None):
if dbuser and dbuser not in obj.root_users:
obj.root_users.append(dbuser)
return
if dbnetgroup and dbnetgroup not in obj.root_netgroups:
obj.root_netgroups.append(dbnetgroup)
def render(self, session, logger, plenaries, username, netgroup,
personality, archetype, justification, user, reason, **arguments):
dbobj = Personality.get_unique(session, name=personality,
archetype=archetype, compel=True)
for dbstage in dbobj.stages.values():
cm = ChangeManagement(session, user, justification, reason, logger, self.command, **arguments)
cm.consider(dbstage)
cm.validate()
if username:
dbuser = User.get_unique(session, name=username,
compel=True)
self._update_dbobj(dbobj, dbuser=dbuser)
elif netgroup:
dbng = NetGroupWhiteList.get_unique(session, name=netgroup,
compel=True)
self._update_dbobj(dbobj, dbnetgroup=dbng)
session.flush()
plenaries.add(dbobj.stages.values())
plenaries.write()
return
|
smartshark/vcsSHARK | pyvcsshark/utils.py | Python | apache-2.0 | 1,460 | 0.007534 | import os
import sys
def readable_dir(prospective_dir):
""" Function that checks if a path is a directory, if it exists and if it is accessible and only
returns true if all these three are the case
:param prospective_dir: path to the directory"""
if prospective_dir is not None:
if not os.path.isdir(prospective_dir):
raise Exception("readable_dir:{0} is not a valid path".format(prospective_dir))
if os.access(prospective_dir, os.R_OK):
return prospective_dir
else:
raise Exc | eption("readable_dir:{0} is not a readable dir".format(prospective_dir))
def find_plugins(plugin_dir):
"""Finds all python files in the spec | ified path and imports them. This is needed, if we want to
detect automatically, which datastore and parser we can apply
:param plugin_dir: path to the plugin directory"""
plugin_files = [x[:-3] for x in os.listdir(plugin_dir) if x.endswith(".py")]
sys.path.insert(0, plugin_dir)
for plugin in plugin_files:
__import__(plugin)
def get_immediate_subdirectories(a_dir):
""" Helper method, which gets the **immediate** subdirectories of a path. Is helpful, if one want to create a
parser, which looks if certain folders are there.
:param a_dir: directory from which **immediate** subdirectories should be listed """
return [name for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))]
|
gratefulfrog/lib | python/web/pymolhttpd.py | Python | gpl-2.0 | 32,673 | 0.005632 | # Copyright (C) Schrodinger, LLC.
# All Rights Reserved
#
# For more information, see LICENSE in PyMOL's home directory.
#
# pymolhttpd.py
#
# web server interface for controlling PyMOL
# we make extensive use of Python's build-in in web infrastructure
import BaseHTTPServer, cgi, urlparse
import StringIO, socket
# we also rely upon Python's json infrastructure
try:
import simplejson as json
except:
import json
# standard Python dependencies
import types, os, sys, traceback, threading
# NOTE: Let's attempt to follow Python PEP 8 for coding style for this
# source code file. URL: http://www.python.org/de/peps/pep-0008
#
# * maximum target line length to be 79 characters.....................seventy9
# * methods and attribute names as lower_case_underscore
# * class names as UpperCaseCaps
# * private symbols start with a leading underscore
# * uniform indentation consisting of 4 spaces (no tabs!)
_json_mime_types = [ 'text/json', 'application/json' ]
class _PymolHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# for now, we're using a single-threaded server
# our actual HTTP server class is private for the time being
# if we need to, then we'll change this
def do_GET(self):
self.process_request()
def do_POST(self):
self.process_request()
def log_message(self, format, *args):
if self.server.pymol_logging:
BaseHTTPServer.BaseHTTPRequestHandler.log_message(self,format,
*args)
def process_request(self):
"""
parse any URL or FORM arguments and process the request
"""
# verify that the request is coming from this machine
try:
host, port = self.client_address
if (host[0:6] != '127.0.'):
self.send_error(403,
"Only localhost requests are allowed (not: %s)"
% host)
else:
self.session = self.server.pymol_session # local session
self.callback = None
self.parse_args()
self.process_urlpath()
except socket.error:
traceback.print_exc()
print "broken pipe"
pass
def parse_args(self):
"""
parses URL arguments into a urlpath (before the ?)
and a cgiFieldStorage object (args after the ?).
for example:
http://localhost:8080/apply/pymol.cmd.color?color=blue&selection=benz
would yield self.fs.getvalue("color") as "blue"
and self.fs.getvalue("selection") as "benz"
self.urlpath would be "/apply/pymol.cmd.color"
"""
if (self.command == "POST"):
self.fs = cgi.FieldStorage(fp=self.rfile, headers=self.headers,
environ = {'REQUEST_METHOD':'POST'},
keep_blank_values = 1)
self.urlpath = self.path
elif (self.command == "GET"):
scheme,netloc,path,params,qs,fragment = urlparse.urlparse(self.path)
self.fs = cgi.FieldStorage(environ = {'REQUEST_METHOD':'GET',
'QUERY_STRING':qs},
keep_blank_values = 1)
self.urlpath = path
else:
self.fs = None
def process_urlpath(self):
"""
self.urlpath can be a request for a document, or a
special request, such as apply or getattr
"""
parts = self.urlpath.split('/')
# for example:
# if http://localhost:8080/apply/pymol.cmd.color?...
# then parts is ['', 'apply', 'pymol.cmd.color...']
# or if http://localhost:8080/apply?_json=...
# then parts is ['', 'apply?_json=...']
if len(parts) < 2: # then it cannot be a PyMOL request
self.send_doc() # simple file retrieval
else: # might be a PyMOL request
if len(parts) == 2: # no m | ethod name or trailing slash -> blank
parts.append('')
if (parts[1] == 'apply'): # calling a method
self.pymol_apply(parts[2])
elif (parts[1] == 'getattr'): # retrieving a property
self.pymol_getattr(parts[2])
elif (parts[1] == 'echo'): # for debugging purposes
self.send_resp_header(200,'text/plain')
| self.echo_args(parts[2])
else: # simple file retrieval
self.send_doc()
def pymol_getattr(self, attr):
"""
apply the repr method to the requested attr, but only for
allowed attributes - those stored in the session dictionary
"""
key = '/getattr/' + attr;
if self.session.has_key(key):
try:
result = repr(self.session[key])
self.send_json_result(result)
except:
self.send_error(500,"Unable to get attribute.")
self.wfile.write(" %s\n" % attr)
traceback.print_exc(file=self.wfile)
else:
self.send_error(404,"Not a recognized attribute")
self.wfile.write(" %s is not a recognized attribute\n" % attr)
def wrap_return(self, result, status="OK", indent=None):
r = { 'status' : status, 'result' : result }
if self.server.wrap_natives==1:
return json.dumps(r,indent)
else:
return json.dumps(result,indent)
def send_json_result(self, result):
"""
send the mime header and result body. requests that came from
XMLHTTPRequest have specified they will accept (expect) json
formatted results. other requests will have come from
ordinary GET or POST requests via links or forms
"""
if self.callback != None:
self.send_resp_header(200,'text/javascript')
self.wfile.write("%s(%s)"%(self.callback,self.wrap_return(result)))
else:
accept_mime = self.headers.getheader('Accept')
if accept_mime in _json_mime_types:
self.send_resp_header(200,accept_mime)
self.wfile.write(self.wrap_return(result))
else:
self.send_resp_header(200,'text/html')
self.wfile.write("PyMOL's JSON response: <pre>")
self.wfile.write(self.wrap_return(result,indent=4))
self.wfile.write("</pre>")
def send_json_error(self, code, message):
if self.callback != None:
self.send_resp_header(code,'text/javascript')
self.wfile.write("%s(%s)"%(self.callback,self.wrap_return(message,"ERROR")))
else:
accept_mime = self.headers.getheader('Accept')
if accept_mime in _json_mime_types:
self.send_resp_header(code,accept_mime)
self.wfile.write(self.wrap_return(message,"ERROR"))
else:
self.send_resp_header(code,'text/html')
self.wfile.write("PyMOL's JSON response: <pre>")
self.wfile.write(self.wrap_return(message,"ERROR",indent=4))
self.wfile.write("</pre>")
def send_exception_json(self, code, message):
fp = StringIO.StringIO()
traceback.print_exc(file=fp)
tb = fp.getvalue()
message = message + tb.split('\n')
response = json.dumps(message)
if self.callback != None:
self.send_resp_header(code, 'text/javascript')
self.wfile.write("%s(%s)"%(self.callback,response))
else:
accept_mime = self.headers.getheader('Accept')
if accept_mime in _json_mime_types:
self.send_resp_header(code,accept_mime)
self.wfile.write(response)
else:
self.send_resp_header(code,'text/html')
self.wfile.write("PyMOL's JSON response: <pre>")
self.wfile.write(json.dumps(json.loads(response),indent=4))
|
coursera/courseraresearchexports | courseraresearchexports/containers/__init__.py | Python | apache-2.0 | 65 | 0 | __ | all__ = [
"clie | nt",
"utils"
]
from . import * # noqa
|
mmgen/mmgen | mmgen/color.py | Python | gpl-3.0 | 3,151 | 0.046652 | #!/usr/bin/env python3
#
# mmgen = Multi-Mode GENerator, command-line Bitcoin cold storage solution
# Copyright (C)2013-2022 The MMGen Project <mmgen@tuta.io>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
color.py: color handling for the MMGen suite
"""
_colors = {
'black': ( 232, (30,0) ),
'red': ( 210, (31,1) ),
'green': ( 121, (32,1) ),
'yellow': ( 229, (33,1) ),
'blue': ( 75, (34,1) ),
'magenta': ( 205, (35,1) ),
'cyan': ( 122, (36,1) ),
'pink': ( 218, (35,1) ),
'orange': ( 216, (31,1) ),
'gray': ( 246, (30,1) ),
'purple': ( 141, (35,1) ),
'brown': ( 208, (33,0) ),
'grn | dim': ( 108, (32,0) ),
'redbg': ( (232,210), (30,101) ),
'grnbg': ( (232,121), (30,102) ) | ,
'blubg': ( (232,75), (30,104) ),
'yelbg': ( (232,229), (30,103) ),
}
def nocolor(s):
return s
def set_vt100():
'hack to put term into VT100 mode under MSWin'
from .globalvars import g
if g.platform == 'win':
from subprocess import run
run([],shell=True)
def get_terminfo_colors(term=None):
from subprocess import run,PIPE
cmd = ['infocmp','-0']
if term:
cmd.append(term)
try:
cmdout = run(cmd,stdout=PIPE,check=True).stdout.decode()
except:
return None
else:
s = [e.split('#')[1] for e in cmdout.split(',') if e.startswith('colors')][0]
from .util import is_hex_str
if s.isdecimal():
return int(s)
elif s.startswith('0x') and is_hex_str(s[2:]):
return int(s[2:],16)
else:
return None
def init_color(num_colors='auto'):
assert num_colors in ('auto',8,16,256,0)
import mmgen.color as self
if num_colors == 'auto':
import os
t = os.getenv('TERM')
num_colors = 256 if (t and t.endswith('256color')) or get_terminfo_colors() == 256 else 16
reset = '\033[0m'
if num_colors == 0:
ncc = (lambda s: s).__code__
for c in _colors:
getattr(self,c).__code__ = ncc
elif num_colors == 256:
for c,e in _colors.items():
start = (
'\033[38;5;{};1m'.format(e[0]) if type(e[0]) == int else
'\033[38;5;{};48;5;{};1m'.format(*e[0]) )
getattr(self,c).__code__ = eval(f'(lambda s: "{start}" + s + "{reset}").__code__')
elif num_colors in (8,16):
for c,e in _colors.items():
start = (
'\033[{}m'.format(e[1][0]) if e[1][1] == 0 else
'\033[{};{}m'.format(*e[1]) )
getattr(self,c).__code__ = eval(f'(lambda s: "{start}" + s + "{reset}").__code__')
set_vt100()
for _c in _colors:
exec(f'{_c} = lambda s: s')
|
watchdogpolska/poradnia.siecobywatelska.pl | poradnia/events/views.py | Python | bsd-3-clause | 5,142 | 0.000583 | import locale
from atom.ext.guardian.views import RaisePermissionRequiredMixin
from braces.views import (
FormValidMessageMixin,
LoginRequiredMixin,
SelectRelatedMixin,
UserFormKwargsMixin,
)
from cached_property import cached_property
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils.encoding import force_text
from django.utils.html import mark_safe
from django.utils.timezone import now
from django.utils.translation import ugettext as _
from django.views.generic imp | ort (
ArchiveIndexView,
CreateView,
MonthArchiveView,
UpdateView,
)
from django.views.generic.list import BaseListView
from poradnia.cases.models import C | ase
from poradnia.keys.mixins import KeyAuthMixin
from poradnia.users.utils import PermissionMixin
from .forms import EventForm
from .models import Event
from .utils import EventCalendar
class EventCreateView(
RaisePermissionRequiredMixin, UserFormKwargsMixin, FormValidMessageMixin, CreateView
):
model = Event
form_class = EventForm
template_name = "events/form.html"
permission_required = ["cases.can_add_record"]
@cached_property
def case(self):
return get_object_or_404(Case, pk=self.kwargs["case_pk"])
def get_permission_object(self):
return self.case
def get_form_kwargs(self, *args, **kwargs):
kwargs = super().get_form_kwargs()
kwargs["case"] = self.case
return kwargs
def get_form_valid_message(self):
return _("Success added new event %(event)s") % ({"event": self.object})
class EventUpdateView(
RaisePermissionRequiredMixin, UserFormKwargsMixin, FormValidMessageMixin, UpdateView
):
model = Event
form_class = EventForm
template_name = "events/form.html"
permission_required = ["cases.can_add_record"]
def get_permission_object(self):
return self._object.case
@cached_property
def _object(self):
return super().get_object()
def get_object(self, *args, **kwargs):
return self._object
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["case"] = self.object.case
return kwargs
def form_valid(self, form):
self.object.reminder_set.all().update(active=False)
return super().form_valid(form)
def get_form_valid_message(self):
return _("Success updated event %(event)s") % {"event": self.object}
class CalendarListView(PermissionMixin, LoginRequiredMixin, ArchiveIndexView):
model = Event
date_field = "time"
allow_future = True
date_list_period = "month"
class CalendarEventView(
PermissionMixin, SelectRelatedMixin, LoginRequiredMixin, MonthArchiveView
):
model = Event
date_field = "time"
allow_future = True
select_related = ["case", "record"]
template_name = "events/calendar.html"
def get_language_code(self):
return getattr(self.request, "LANGUAGE_CODE", settings.LANGUAGE_CODE)
def get_user_locale(self):
if self.get_language_code() in locale.locale_alias:
name = locale.locale_alias[self.get_language_code()].split(".")[0]
return (name, "UTF-8")
else:
return locale.getlocale()
def get_calendar(self):
date = (int(self.get_year()), int(self.get_month()))
cal = EventCalendar(self.object_list).formatmonth(*date)
return mark_safe(cal)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["calendar"] = self.get_calendar()
return context
class ICalendarView(KeyAuthMixin, PermissionMixin, BaseListView):
window = 1
model = Event
def get_event(self, obj):
from icalendar import Event
event = Event()
event["uid"] = obj.pk
event["dtstart"] = obj.time
event["summary"] = force_text(obj)
event["description"] = obj.text
return event
def get_subcomponents(self):
return [self.get_event(x) for x in self.get_queryset()]
def get_icalendar(self):
from icalendar import Calendar
cal = Calendar()
cal["summary"] = "Events for {}".format(self.request.user)
cal["dtstart"] = self.get_start()
cal["dtend"] = self.get_end()
for component in self.get_subcomponents():
cal.add_component(component)
return cal
def get_start(self):
return now() + relativedelta(months=+self.window)
def get_end(self):
return now() + relativedelta(months=-self.window)
def get_queryset(self):
qs = super().get_queryset()
qs = qs.filter(time__lt=self.get_start())
qs = qs.filter(time__gt=self.get_end())
return qs
def render_to_response(self, *args, **kwargs):
response = HttpResponse(content_type="application/force-download")
response["Content-Disposition"] = "attachment; filename=calendar.ics"
response.write(self.get_icalendar().to_ical())
return response
|
kaji-project/shinken-mod-logstore-sqlite | module/module.py | Python | agpl-3.0 | 28,080 | 0.003098 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# import von modules/livestatus_logstore
"""
This class is for attaching a sqlite database to a livestatus broker module.
It is one possibility for an exchangeable storage for log broks
"""
import os
import sys
import time
import datetime
import re
from shinken.objects.service import Service
from shinken.log import logger
from shinken.modulesctx import modulesctx
livestatus_broker = modulesctx.get_module('livestatus')
# Import a class from the livestatus module, should be already loaded!
#from shinken.modules.livestatus import module as livestatus_broker
LiveStatusStack = livestatus_broker.LiveStatusStack
LOGCLASS_INVALID = livestatus_broker.LOGCLASS_INVALID
Logline = livestatus_broker.Logline
old_implementation = False
try:
import sqlite3
except ImportError: # python 2.4 do not have it
try:
import pysqlite2.dbapi2 as sqlite3 # but need the pysqlite2 install from http://code.google.com/p/pysqlite/downloads/list
except ImportError: # python 2.4 do not have it
import sqlite as sqlite3 # one last try
old_implementation = True
from shinken.basemodule import BaseModule
from shinken.objects.module import Module
properties = {
'daemons': ['livestatus'],
'type': 'logstore_sqlite',
'external': False,
'phases': ['running'],
}
# called by the plugin manager
def get_instance(plugin):
logger.info("[Logstore SQLite] Get an LogStore Sqlite module for plugin %s" % plugin.get_name())
instance = LiveStatusLogStoreSqlite(plugin)
return instance
def row_factory(cursor, row):
"""Handler for the sqlite fetch method."""
return Logline(sqlite_cursor=cursor.description, sqlite_row=row)
class LiveStatusLogStoreError(Exception):
pass
class LiveStatusLogStoreSqlite(BaseModule):
def __init__(self, modconf):
BaseModule.__init__(self, modconf)
self.plugins = []
# Change. The var folder is not defined based upon '.', but upon ../var from the process name (shinken-broker)
# When the database_file variable, the default variable was calculated from '.'... Depending on where you were
# when you ran the command the behavior changed.
self.database_file = getattr(modconf, 'database_file', os.path.join(os.path.abspath('.'), 'livestatus.db'))
self.archive_path = getattr(modconf, 'archive_path', os.path.join(os.path.dirname(self.database_file), 'archives'))
try:
os.stat(self.archive_path)
except:
os.mkdir(self.archive_path)
max_logs_age = getattr(modconf, 'max_logs_age', '365')
maxmatch = re.match(r'^(\d+)([dwmy]*)$', max_logs_age)
if maxmatch is None:
logger.warning("[Logstore SQLite] Warning: wrong format for max_logs_age. Must be <number>[d|w|m|y] or <number> and not %s" % max_logs_age)
return None
else:
if not maxmatch.group(2):
self.max_logs_age = int(maxmatch.group(1))
elif maxmatch.group(2) == 'd':
self.max_logs_age = int(maxmatch.group(1))
elif maxmatch.group(2) == 'w':
self.max_logs_age = int(maxmatch.group(1)) * 7
elif maxmatch.group(2) == 'm':
self.max_logs_age = int(maxmatch.group(1)) * 31
elif maxmatch.group(2) == 'y':
self.max_logs_age = int(maxmatch.group(1)) * 365
self.use_aggressive_sql = (getattr(modconf, 'use_aggressive_sql', '0') == '1')
self.read_only = (getattr(modconf, 'read_only', '0') == '1')
# This st | ack is used to create a full-blown select-statement
self.sql_filter_stack = LiveStatusSqlStack()
# This stack is used to create a minimal select-statement which
# selects only by time >= and time <=
self.sql_time_filter_stack = LiveStatusSqlStack()
# Now sleep one second, so that won't get lineno collisions with the last second
time.sleep(1)
Logline.lineno = 0 |
def load(self, app):
self.app = app
def init(self):
self.old_implementation = old_implementation
def open(self):
logger.info("[Logstore SQLite] Open LiveStatusLogStoreSqlite ok : %s" % self.database_file)
self.dbconn = sqlite3.connect(self.database_file, check_same_thread=False)
# Get no problem for utf8 insert
self.dbconn.text_factory = str
self.dbcursor = self.dbconn.cursor()
#self.dbconn.row_factory = row_factory
#self.execute("PRAGMA cache_size = 200000")
# Create db file and tables if not existing
self.prepare_log_db_table()
# Start with commit and rotate immediately so the interval timers
# get initialized properly
now = time.time()
self.next_log_db_commit = now
self.next_log_db_rotate = now
# Immediately archive data. This also splits old-style (storing logs
# from more than one day) up into many single-day databases
if self.max_logs_age > 0:
# open() is also called from log_db_do_archive (with max_logs_age
# of 0 though)
self.log_db_do_archive()
def close(self):
self.dbconn.commit()
self.dbconn.close()
self.dbconn = None
if self.max_logs_age == 0:
# Again, if max_logs_age is 0, we don't care for archives.
# If max_logs_age was manually set to 0, we know that we don't
# want archives. If it was set by log_db_do_archive(), we don't
# want to leave empty directories around.
try:
os.removedirs(self.archive_path)
except:
pass
def prepare_log_db_table(self):
if self.read_only:
return
# 'attempt', 'class', 'command_name', 'comment', 'contact_name', 'host_name', 'lineno', 'message',
# 'plugin_output', 'service_description', 'state', 'state_type', 'time', 'type',
cmd = "CREATE TABLE IF NOT EXISTS logs(logobject INT, attempt INT, class INT, command_name VARCHAR(64), comment VARCHAR(256), contact_name VARCHAR(64), host_name VARCHAR(64), lineno INT, message VARCHAR(512), options VARCHAR(512), plugin_output VARCHAR(256), service_description VARCHAR(64), state INT, state_type VARCHAR(10), time INT, type VARCHAR(64))"
self.execute(cmd)
cmd = "CREATE INDEX IF NOT EXISTS logs_time ON logs (time)"
self.execute(cmd)
cmd = "CREATE INDEX IF NOT EXISTS logs_host_name ON logs (host_name)"
self.execute(cmd)
cmd = "PRAGMA journal_mode=truncate"
self.execute(cmd)
self.commit()
def commit_and_rotate_log_db(self):
"""Submit a commit or rotate the complete database file.
This function is called whenever the mainloop doesn't handle a request.
The database updates are committed every second.
Every day at 00:05 the database contents with a timestamp of past days
are moved to their own datafiles (one for each day). We wait until 00:05
because in a distributed environment even after 00:00 (on the broker host)
we might receive data from other hosts with a timestamp dating from yesterday.
"""
if self.read_only:
|
noironetworks/aci-integration-module | aim/db/hashtree_db_listener.py | Python | apache-2.0 | 18,457 | 0 | # Copyright (c) 2016 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import traceback
from oslo_log import log as logging
from oslo_utils import importutils
from aim.api import resource
from aim.api import status as api_status
from aim.api import tree as aim_tree
from aim.common.hashtree import exceptions as hexc
from aim.common.hashtree import structured_tree as htree
from aim.common import utils
from aim import config as aim_cfg
from aim import tree_manager
ACTION_LOG_THRESHOLD = 1000
MAX_EVENTS_PER_ROOT = 10000
LOG = logging.getLogger(__name__)
# Not really rootless, they just miss the root reference attributes
ROOTLESS_TYPES = ['fabricTopology']
class HashTreeDbListener(object):
"""Updates persistent hash-tree in response to DB updates."""
def __init__(self, aim_manager):
self.aim_manager = aim_manager
self.tt_mgr = tree_manager.HashTreeManager()
self.tt_maker = tree_manager.AimHashTreeMaker()
self.tt_builder = tree_manager.HashTreeBuilder(self.aim_manager)
def on_commit(self, store, added, updated, deleted):
# Query hash-tree for each tenant and modify the tree based on DB
# updates
# TODO(ivar): Use proper store context once dependency issue is fixed
ctx = utils.FakeContext(store=store)
resetting_roots = set()
with ctx.store.begin(subtransactions=True):
for i, resources in enumerate((added + updated, deleted)):
for res in resources:
try:
root = res.root
except AttributeError:
continue
if i == 0 and getattr(res, 'sync', True):
action = aim_tree.ActionLog.CREATE
else:
action = aim_tree.ActionLog.DELETE
# TODO(ivar): root should never be None for any object!
# We have some conversions broken
if self._get_reset_count(ctx, root) > 0:
resetting_roots.add(root)
if not root or root in resetting_roots:
continue
if self._get_log_count(ctx, root) >= MAX_EVENTS_PER_ROOT:
LOG.warn('Max events per root %s reached, '
'requesting a reset' % root)
action = aim_tree.ActionLog.RESET
log = aim_tree.ActionLog(
root_rn=root, action=action,
object_dict=utils.json_dumps(res.__dict__),
object_type=type(res).__name__)
self.aim_manager.create(ctx, log)
def _get_log_count(self, ctx, root):
return self.aim_manager.count(ctx, aim_tree.ActionLog, root_rn=root)
def _get_reset_count(self, ctx, root):
return self.aim_manager.count(ctx, aim_tree.ActionLog, root_rn=root,
action=aim_tree.ActionLog.RESET)
def _delete_trees(self, aim_ctx, root=None):
with aim_ctx.store.begin(subtransactions=True):
# Delete existing trees
if root:
self.tt_mgr.clean_by_root_rn(aim_ctx, root)
else:
self.tt_mgr.clean_all(aim_ctx)
def _recreate_trees(self, aim_ctx, root=None):
with aim_ctx.store.begin(subtransactions=True):
cache = {}
log_by_root = {}
# Delete existing trees
if root:
type, name = self.tt_mgr.root_key_funct(root)[0].split('|')
# Retrieve objects
for klass in self.aim_manager.aim_resources:
if issubclass(klass, resource.AciResourceBase):
filters = {}
if root:
if self._retrieve_class_root_type(
klass, cache=cache) != type:
# Not the right subtree
continue
if type not in ROOTLESS_TYPES:
filters[klass.root_ref_attribute()] = name
# Get all objects of that type
for obj in self.aim_manager.find(aim_ctx, klass,
**filters):
# We will not add this SG rule to AIM tree to
# prevent it from showing up in APIC because its
# a block-all rule.
if (aim_cfg.CONF.aim.
remove_remote_group_sg_rule_if_block_all and
klass == resource.SecurityG | roupRule and
obj.remote_group_id and
not obj.remote_ips):
continue
# Need a | ll the faults and statuses as well
stat = self.aim_manager.get_status(
aim_ctx, obj, create_if_absent=False)
if getattr(obj, 'sync', True):
if stat:
log_by_root.setdefault(obj.root, []).append(
(aim_tree.ActionLog.CREATE, stat, None))
for f in stat.faults:
log_by_root.setdefault(
obj.root, []).append(
(aim_tree.ActionLog.CREATE, f, None))
del stat.faults
log_by_root.setdefault(obj.root, []).append(
(aim_tree.ActionLog.CREATE, obj, None))
# Reset the trees
self._push_changes_to_trees(aim_ctx, log_by_root,
delete_logs=False, check_reset=False)
def cleanup_zombie_status_objects(self, aim_ctx, roots=None):
with aim_ctx.store.begin(subtransactions=True):
# Retrieve objects
klass = api_status.AciStatus
filters = {}
if roots is not None:
filters['in_'] = {'resource_root': roots}
to_delete = []
for stat in self.aim_manager.find(aim_ctx, klass, **filters):
parent = self.aim_manager.get_by_id(
aim_ctx, stat.parent_class, stat.resource_id)
if not parent or parent.root != stat.resource_root:
to_delete.append(stat.id)
if to_delete:
LOG.info("Deleting parentless status objects "
"%s" % to_delete)
self.aim_manager.delete_all(
aim_ctx, klass, in_={'id': to_delete})
def reset(self, store, root=None):
aim_ctx = utils.FakeContext(store=store)
with aim_ctx.store.begin(subtransactions=True):
self.cleanup_zombie_status_objects(aim_ctx, roots=[root])
self._delete_trees(aim_ctx, root=root)
self._recreate_trees(aim_ctx, root=root)
def _retrieve_class_root_type(self, klass, cache=None):
cache = cache if cache is not None else {}
if klass in cache:
return cache[klass]
stack = [klass]
while klass._tree_parent:
klass = klass._tree_parent
stack.append(klass)
for k in stack:
cache[k] = klass._aci_mo_name
return cache[klass]
def catch_up_with_action_log(self, store, served_tenants=None):
served_tenants = serve |
wholland/env | vim/runtime/bundle/ultisnips/plugin/UltiSnips/_diff.py | Python | mit | 7,737 | 0.010857 | #!/usr/bin/env python
# encoding: utf-8
from collections import defaultdict
import sys
from UltiSnips import _vim
from UltiSnips.geometry import Position
def is_complete_edit(initial_line, a, b, cmds):
buf = a[:]
for cmd in cmds:
ctype, line, col, char = cmd
line -= initial_line
if ctype == "D":
if char != '\n':
buf[line] = buf[line][:col] + buf[line][col+len(char):]
else:
if line + 1 < len(buf):
buf[line] = buf[line] + buf[line+1]
del buf[line+1]
else:
del buf[line]
elif ctype == "I":
buf[line] = buf[line][:col] + char + buf[line][col:]
buf = '\n'.join(buf).split('\n')
return len(buf) == len(b) and all(j==k for j,k in zip(buf, b))
def guess_edit(initial_line, lt, ct, vs):
"""
Try to guess what the user might have done by heuristically looking at cursor movement
number of changed lines and if they got longer or shorter. This will detect most simple
movements like insertion, deletion of a line or carriage return.
"""
if not len(lt) and not len(ct): return True, ()
pos = vs.pos
ppos = vs.ppos
if len(lt) and (not ct or (len(ct) == 1 and not ct[0])): # All text deleted?
es = []
if not ct: ct = ['']
for i in lt:
es.append(("D", initial_line, 0, i))
es.append(("D", initial_line, 0, "\n"))
es.pop() # Remove final \n because it is not really removed
if is_complete_edit(initial_line, lt, ct, es): return True, es
if ppos.mode == 'v': # Maybe selectmode?
sv = list(map(int, _vim.eval("""getpos("'<")"""))); sv = Position(sv[1]-1,sv[2]-1)
ev = list(map(int, _vim.eval("""getpos("'>")"""))); ev = Position(ev[1]-1,ev[2]-1)
if "exclusive" in _vim.eval("&selection"):
ppos.col -= 1 # We want to be inclusive, sorry.
ev.col -= 1
es = []
if sv.line == ev.line:
es.append(("D", sv.line, sv.col, lt[sv.line - initial_line][sv.col:ev.col+1]))
if sv != pos and sv.line == pos.line:
es.append(("I", sv.line, sv.col, ct[sv.line - initial_line][sv.col:pos.col+1]))
if is_complete_edit(initial_line, lt, ct, es): return True, es
if pos.line == ppos.line:
if len(lt) == len(ct): # Movement only in one line
llen = len(lt[ppos.line - initial_line])
clen = len(ct[pos.line - initial_line])
if ppos < pos and clen > llen: # Likely that only characters have been added
es = (
("I", ppos.line, ppos.col, ct[ppos.line - initial_line][ppos.col:pos.col]),
)
if is_complete_edit(initial_line, lt, ct, es): return True, es
if clen < llen:
if ppos == pos: # 'x' or DEL or dt or something
es = (
("D", pos.line, pos.col, lt[ppos.line - initial_line][ppos.col:ppos.col + (llen - clen)]),
)
if is_complete_edit(initial_line, lt, ct, es): return True, es
if pos < ppos: # Backspacing or dT dF?
es = (
("D", pos.line, pos.col, lt[pos.line - initial_line][pos.col:pos.col + llen - clen]),
)
if is_complete_edit(initial_line, lt, ct, es): return True, es
elif len(ct) < len(lt): # Maybe some lines were deleted? (dd or so)
es = []
for i in range(len(lt)-len(ct)):
es.append( ("D", pos.line, 0, lt[pos.line - initial_line + i]))
es.append( ("D", pos.line, 0, '\n'))
if is_complete_edit(initial_line, lt, ct, es): return True, es
else: # Movement in more than one line
if ppos.line + 1 == pos.line and pos.col == 0: # Carriage return?
es = (("I", ppos.line, ppos.col, "\n"),)
if is_complete_edit(initial_line, lt, ct, es): return True, es
return False, None
def diff(a, b, sline = 0):
"""
Return a list of deletions and insertions that will turn a into b. This is
done by traversing an implicit edit graph and searching for the shortest
route. The basic idea is as follows:
- Matching a character is free as long as there was no deletion/insertion
before. Then, matching will be seen as delete + insert [1].
- Deleting one character has the same cost everywhere. Each additional
| character costs only have of the first deletion.
- Insertion is cheaper the earlier it happes. The first character is more
expensive that any later [2].
[1] This is that world -> aolsa will be "D" world + "I" aolsa instead of
"D" w , "D" rld, "I" a, "I" lsa
[2] This is that "hello\n\n" -> "hello\n\n\n" will insert a newline after hello
and not after \n
"""
d = defaultdict(list)
seen = defaultdict(lambda: | sys.maxsize)
d[0] = [ (0,0,sline, 0, ()) ]
cost = 0
D_COST = len(a)+len(b)
I_COST = len(a)+len(b)
while True:
while len(d[cost]):
x, y, line, col, what = d[cost].pop()
if a[x:] == b[y:]:
return what
if x < len(a) and y < len(b) and a[x] == b[y]:
ncol = col + 1
nline = line
if a[x] == '\n':
ncol = 0
nline +=1
lcost = cost + 1
if (what and what[-1][0] == "D" and what[-1][1] == line and
what[-1][2] == col and a[x] != '\n'):
# Matching directly after a deletion should be as costly as
# DELETE + INSERT + a bit
lcost = (D_COST + I_COST)*1.5
if seen[x+1,y+1] > lcost:
d[lcost].append((x+1,y+1, nline, ncol, what))
seen[x+1,y+1] = lcost
if y < len(b): # INSERT
ncol = col + 1
nline = line
if b[y] == '\n':
ncol = 0
nline += 1
if (what and what[-1][0] == "I" and what[-1][1] == nline and
what[-1][2]+len(what[-1][-1]) == col and b[y] != '\n' and
seen[x,y+1] > cost + (I_COST + ncol) // 2
):
seen[x,y+1] = cost + (I_COST + ncol) // 2
d[cost + (I_COST + ncol) // 2].append(
(x,y+1, line, ncol, what[:-1] + (
("I", what[-1][1], what[-1][2], what[-1][-1] + b[y]),) )
)
elif seen[x,y+1] > cost + I_COST + ncol:
seen[x,y+1] = cost + I_COST + ncol
d[cost + ncol + I_COST].append((x,y+1, nline, ncol,
what + (("I", line, col,b[y]),))
)
if x < len(a): # DELETE
if (what and what[-1][0] == "D" and what[-1][1] == line and
what[-1][2] == col and a[x] != '\n' and what[-1][-1] != '\n' and
seen[x+1,y] > cost + D_COST // 2
):
seen[x+1,y] = cost + D_COST // 2
d[cost + D_COST // 2].append((x+1,y, line, col, what[:-1] +
(("D",line, col, what[-1][-1] + a[x]),) )
)
elif seen[x+1,y] > cost + D_COST:
seen[x+1,y] = cost + D_COST
d[cost + D_COST].append((x+1,y, line, col, what +
(("D",line, col, a[x]),) )
)
cost += 1
|
home-assistant/home-assistant | tests/components/wemo/conftest.py | Python | apache-2.0 | 3,246 | 0 | """Fixtures for pywemo."""
import asyncio
from unittest.mock import create_autospec, patch
import pytest
import pywemo
from homeassistant.components.wemo import CONF_DISCOVERY, CONF_STATIC
from homeassistant.components.wemo.const import DOMAIN
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
MOCK_HOST = "127.0.0.1"
MOCK_PORT = 50000
MOCK_NAME = "WemoDeviceName"
MOCK_SERIAL_NUMBER = "WemoSerialNumber"
MOCK_FIRMWARE_VERSION = "WeMo_WW_2.00.XXXXX.PVT-OWRT"
@pytest.fixture(name="pywemo_model")
def pywemo_model_fixture():
"""Fixture containing a pywemo class name used by pywemo_device_fixture."""
return "LightSwitch"
@pytest.fixture(name="pywemo_registry", autouse=True)
async def async_pywemo_registry_fixture():
"""Fixture for SubscriptionRegistry instances."""
registry = create_autospec(pywemo.SubscriptionRegistry, instance=True)
registry.callbacks = {}
registry.semaphore = asyncio.Semaphore(value=0)
def on_func(device, type_filter, callback):
registry.callbacks[device.name] = callback
registry.semaphore.release()
registry.on.side_effect = on_func
registry.is_subscribed.return_value = False
with patch("pywemo.SubscriptionRegistry", return_value=registry):
yield registry
@pytest.fixture(name="pywemo_discovery_responder", autouse=True)
def pywemo_discovery_responder_fixture():
"""Fixture for the DiscoveryResponder instance."""
with patch("pywemo.ssdp.DiscoveryResponder", autospec=True):
yield
@pytest.fixture(name="pywemo_device")
def pywemo_device_fixture(pywemo_registry, pywemo_model):
"""Fixture for WeMoDevice instances."""
cls = getattr(pywemo, pywemo_model)
device = create_autospec(cls, instance=True)
device.host = MOCK_HOST
device.port = MOCK_PORT
device.name = MOCK_NAME
device.serialnumber = MOCK_SERIAL_NUMBER
device.model_name = pywemo_model.replace("LongPress", "")
device.udn = f"uuid:{device.model_name}-1_0-{device.serialnumber}"
device.firmware_version = MOCK_FIRMWARE_VERSION
device.get_state.return_value = 0 # Default to Off
device.supports_long_press.return_value = cls.supports_long_press()
url = f"http://{MOCK_HOST}:{MOCK_PORT}/setup.xml"
with patch("pywemo.setup_url_for_addr | ess", return_value=url), patch(
"pywemo.discovery.device_from_description", return_value=device
):
yield device
@pytest.fixture(name="wemo_entity_suffix")
def wemo_entity_suffix_fixture():
"""Fixture to select a specific entity for wemo_entity."""
return ""
@pytest.fixture(name="w | emo_entity")
async def async_wemo_entity_fixture(hass, pywemo_device, wemo_entity_suffix):
"""Fixture for a Wemo entity in hass."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_DISCOVERY: False,
CONF_STATIC: [f"{MOCK_HOST}:{MOCK_PORT}"],
},
},
)
await hass.async_block_till_done()
entity_registry = er.async_get(hass)
for entry in entity_registry.entities.values():
if entry.entity_id.endswith(wemo_entity_suffix):
return entry
return None
|
jad-b/garden | garden_test/testfile.py | Python | mit | 42 | 0 | #!/usr/bin/ | env python3
ve | rsion = '0.1.2'
|
cvxgrp/ncvx | ncvx/orthog.py | Python | gpl-3.0 | 1,674 | 0 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from .noncvx_variable import NonCvxVariable
import cvxpy as cp
import numpy as np
class Orthog(NonCvxVariable):
""" A variable satisfying X^TX = I. """
def __init__(self, size, *args, **kwargs):
super().__init__(sha | pe=(size, size), *args, **kwargs)
def init_z(self, rand | om):
"""Initializes the value of the replicant variable.
"""
self.z.value = np.zeros(self.shape)
def _project(self, matrix):
"""All singular values except k-largest (by magnitude) set to zero.
"""
U, s, V = np.linalg.svd(matrix)
s[:] = 1
return U.dot(np.diag(s)).dot(V)
# Constrain all entries to be the value in the matrix.
def _restrict(self, matrix):
return [self == matrix]
def relax(self):
"""Relaxation [I X; X^T I] is PSD.
"""
rows, cols = self.shape
constr = super(Orthog, self).relax()
mat = cp.bmat([[np.eye(rows), self], [X.T, np.eye(cols)]])
return constr + [mat >> 0]
|
sonntagsgesicht/regtest | .aux/venv/lib/python3.9/site-packages/auxilium/tools/git_tools.py | Python | apache-2.0 | 3,958 | 0 | # -*- coding: utf-8 -*-
# auxilium
# --------
# Python project for an automated test and deploy toolkit.
#
# Author: sonntagsgesicht
# Version: 0.1.9, copyright Saturday, 02 October 2021
# Website: https://github.com/sonntagsgesicht/auxilium
# License: Apache License 2.0 (see LICENSE file)
from logging import log, DEBUG, INFO, ERROR
from os import getcwd, chdir
from os.path import exists, join
from dulwich import porcelain
from dulwich.repo import Repo
from .const import ICONS
from .setup_tools import EXT
BRANCH = 'master'
def commit_git(msg='', path=getcwd()):
"""add and commit changes to local `git` repo"""
cwd = getcwd()
chdir(path)
repo = Repo(path) if exists(join(path, '.git')) else Repo.init(path)
_, files, untracked = porcelain.status(repo)
repo.stage(files)
repo.stage(untracked)
# added, ignored = porcelain.add(repo)
staged, un_staged, untracked = porcelain.status(repo, False)
if not any(staged.values()):
log(INFO, ICONS["missing"] + "not files found - did not commit")
log(DEBUG, ICONS[""] + "at " + path)
chdir(cwd)
return 0
log(INFO, ICONS["status"] + "file status in `git` repo")
log(DEBUG, ICONS[""] + "at " + path)
if staged['add']:
log(INFO, ICONS[""] + "add:")
for p in staged['add']:
log(INFO, ICONS[""] + " %s" % p.decode())
if staged['modify']:
log(INFO, ICONS[""] + "modify:")
for p in staged['modify']:
log(INFO, ICONS[""] + " %s" % p.decode())
if staged['delete']:
log(INFO, ICONS[""] + "delete:")
for p in staged['delete']:
log(INFO, ICONS[""] + " %s" % p.decode())
for p in un_staged:
log(INFO, ICONS[""] + "unstaged: %s" % p.decode())
for p in untracked:
log(INFO, ICONS[""] + "untracked : %s" % p)
msg = msg if msg else 'Commit'
msg += EXT
log(INFO, ICONS["commit"] + "commit changes as `%s`" % msg)
log(DEBUG, ICONS[""] + "at " + path)
try:
res = porcelain.commit(repo, msg)
log(DEBUG, ICONS[""] + "as %s" % res.decode())
except Exception as e:
log(ERROR, ICONS['error'] + str(e))
chdir(cwd)
return 1
chdir(cwd)
return 0
def tag_git(tag, msg='', path=getcwd()):
"""tag current branch of local `git` repo"""
log(INFO, ICONS["tag"] + "tag current branch as %s" % tag)
log(DEBUG, ICONS[""] + "at " + path)
tag_list = porcelain.tag_list(Repo(path))
if bytearray(tag.encode()) in tag_list:
log(ERROR, ICONS["error"] +
"tag %s exists in current branch of local `git` repo" % tag)
return 1
if msg:
log(DEBUG, ICONS[""] + "msg: `%s`" % msg)
try:
porcelain.tag_create(Repo(path), tag, message=msg)
except Exception as e:
log(ERROR, ICONS['error'] + str(e))
return 1
return 0
def build_url(url, usr='', pwd='None'): # nosec
pwd = ':' + str(pwd) if pwd and pwd != 'None' else ''
usr = str(usr) if usr else 'token-user' if pwd else ''
remote = \
'https://' + usr + pwd + '@' + url.replace('https://', '')
return remote
def clean_url(url):
http, last = url.split('//', 1)
usr_pwd, url = last.split('@', 1)
usr, _ = usr_pwd.split(':', 1) if ':' in usr_pwd else (usr_pwd, '')
return http + '//' + usr | + '@' + url
class Buffer(list):
def write(self, b):
self.append(b)
def push_git(remote='None', branch=BRANCH, path=getcwd()):
"""push current branch of local to remote `git` repo"""
log(INFO, ICONS["push"] + "push | current branch to remote `git` repo")
log(DEBUG, ICONS[""] + "at " + clean_url(remote))
out = Buffer()
try:
porcelain.push(Repo(path), remote, branch, out, out)
except Exception as e:
log(ERROR, ICONS['error'] + str(e))
return 1
for line in out:
log(INFO, ICONS[""] + line.decode().strip())
return 0
|
ohmu/pghoard | pghoard/archive_sync.py | Python | apache-2.0 | 13,251 | 0.003773 | """
pghoard: sync local WAL files to remote archive
Copyright (c) 2016 Ohmu Ltd
See LICENSE for details
"""
import argparse
import hashlib
import logging
import os
import sys
import requests
from pghoard.common import get_pg_wal_directory
from . import config, logutil, version, wal
from .rohmu.errors import InvalidConfigurationError
class SyncError(Exception):
pass
class ArchiveSync:
"""Iterate over WAL directory in reverse alphanumeric order and upload
files to object storage until we find a file that already exists there.
This can be used after a failover has happened to make sure the archive
has no gaps in case the previous master failed before archiving its
final segment."""
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
self.config = None
self.site = None
self.backup_site = None
self.base_url = None
def set_config(self, config_file, site):
self.config = config.read_json_config_file(config_file, check_commands=False)
self.site = config.get_site_from_config(self.config, site)
self.backup_site = self.config["backup_sites"][self.site]
self.base_url = "http://127.0.0.1:{}/{}".format(self.config["http_port"], self.site)
def get_current_wal_file(self):
# identify the (must be) local database
return wal.get_current_wal_file(self.backup_site["nodes"][0])
def get_first_required_wal_segment(self):
resp = requests.get("{base}/basebackup".format(base=self.base_url))
if resp.status_code != 200:
self.log.error("Error looking up basebackups")
return None, None
items = resp.json()["basebackups"]
if not items:
self.log.error("Unable to find any basebackups")
return None, None
# NOTE: select latest basebackup by name, not necessarily by latest
# wal segment as we'll anyway try to restore the latest basebackup
# *by name*.
latest_basebackup = max(items, key=lambda item: item["name"])
pg_version = latest_basebackup["metadata"].get("pg-version")
return latest_basebackup["metadata"]["start-wal-segment"], pg_version
def archive_sync(self, verify, new_backup_on_failure, max_hash_checks):
self.check_and_upload_missing_local_files(max_hash_checks)
if not verify:
return None
return self.check_wal_archive_integrity(new_backup_on_failure)
def check_and_upload_missing_local_files(self, max_hash_checks):
current_wal_file = self.get_current_wal_file()
first_required_wal_file, _ = self.get_first_required_wal_segment()
# Find relevant WAL files. We do this by checking archival status
# of all WAL files older than the one currently open (ie reverse
# sorted list from newest file that should've been archived to the
# oldest on disk) and and appending missing files to a list. After
# collecting a list we start archiving them from oldest to newest.
# This is done so we don't break our missing archive detection logic
# if sync is interrupted for some reason.
# Sort all timeline files first to make sure they're always
# archived, otherwise the timeline files are processed only after
# all WAL files for a given timeline have been handled.
wal_dir = get_pg_wal_directory(self.backup_site)
wal_files = os.listdir(wal_dir)
wal_files.sort(key=lambda f: (f.endswith(".history"), f), reverse=True)
need_archival = []
hash_checks_done = 0
existing_wal_without_checksum_count = 0
for wal_file in wal_files:
archive_type = None
if wal.TIMELINE_RE.match(wal_file):
# We want all timeline files
archive_type = "TIMELINE"
elif not wal.WAL_RE.match(wal_file):
pass # not a WAL or timeline file
elif wal_file == current_wal_file:
self.log.info("Skipping currently open WAL file %r", wal_file)
elif wal_file > current_wal_file:
self.log.debug("Skipping recycled WAL file %r", wal_file)
elif first_required_wal_file is not None and wal_file < first_required_wal_file:
self.log.info("WAL file %r is not needed for the latest basebackup", wal_file)
break
else:
# WAL file in range first_required_wal_file .. current_wal_file
archive_type = "WAL"
if archive_type:
resp = requests.head("{base}/archive/{file}".format(base=self.base_url, file=wal_file))
if resp.status_code == 200:
remote_hash = resp.headers.get("metadata-hash")
hash_algorithm = resp.headers.get("metadata-hash-algorithm")
check_hash = bool(
archive_type == "WAL" and (hash_checks_done < max_hash_checks or max_hash_checks < 0) and remote_hash
)
if archive_type == "WAL" and not remote_hash:
# If we don't have hashes available (old pghoard was running on previous master), re-upload first
# file that already exists in remote storage and doesn't have a checksum since it might be the last
# WAL file of previous timeline uploaded by old master and invalid because it doesn't have the
# timeline switch event and have some writes that are not valid for our tim | eline
existing_wal_without_checksum_count += 1
if existing_wal_without_checksum_count == 1:
self.log.info(
"%s file %r already archived but no hash is available, reuploading", archive_type, wal_fi | le
)
need_archival.append(wal_file)
continue
if check_hash:
hash_checks_done += 1
our_hash = self.calculate_hash(os.path.join(wal_dir, wal_file), hash_algorithm)
if not our_hash:
self.log.info(
"%s file %r already archived (file deleted before getting hash)", archive_type, wal_file
)
elif remote_hash.lower().strip() != our_hash.lower().strip():
self.log.warning(
"%s file %r already archived but existing hash %r differs from our hash %r, reuploading",
archive_type, wal_file, remote_hash, our_hash
)
need_archival.append(wal_file)
else:
self.log.info("%s file %r already archived and has valid hash", archive_type, wal_file)
else:
self.log.info("%s file %r already archived", archive_type, wal_file)
continue
self.log.info("%s file %r needs to be archived", archive_type, wal_file)
need_archival.append(wal_file)
for wal_file in sorted(need_archival): # sort oldest to newest
resp = requests.put("{base}/archive/{file}".format(base=self.base_url, file=wal_file))
archive_type = "TIMELINE" if ".history" in wal_file else "WAL"
if resp.status_code != 201:
self.log.error("%s file %r archival failed with status code %r", archive_type, wal_file, resp.status_code)
else:
self.log.info("%s file %r archived", archive_type, wal_file)
def check_wal_archive_integrity(self, new_backup_on_failure):
current_wal_file = self.get_current_wal_file()
first_required_wal_file, pg_version = self.get_first_required_wal_segment()
if not current_wal_file:
raise SyncError("Could not figure out current WAL segment")
if not first_required_wal_file:
|
br0r/systemet-api | server.py | Python | mit | 230 | 0.017391 | impo | rt os
from flask import jsonify
from systemetapi import app
from systemetapi import config
@app.route("/")
def index():
return jsonify(version=config.VERSION, status='ok')
if __name__ | == "__main__":
app.run(debug=True)
|
TangentMicroServices/BuildService | buildservice/urls.py | Python | mit | 697 | 0.005739 | from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import sta | tic
from api.views import router
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include(router.urls)),
url(r'^ui/', include('ui.urls', namespace='build_ui')),
url(r'^api-auth/',
include('rest_framework.urls', namespace=' | rest_framework')),
url(r'^explorer/',
include('rest_framework_swagger.urls', namespace='swagger')),
]
# Setting up static files for development:
if settings.DEBUG is True:
urlpatterns = urlpatterns + \
static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
Insanityandme/dotfiles | vim/bundle/YouCompleteMe/python/ycm/youcompleteme.py | Python | unlicense | 25,635 | 0.030505 | # Copyright (C) 2011, 2012 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed | in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from future.utils import iteritems
import os
import vim
import json
import re
import signal
import base64
from subprocess import PIPE
from tempfile import NamedTemporaryFile
from ycm import paths, vimsupport
from ycmd import utils
from ycmd import server_utils
from ycmd.request_wrap import RequestWrap
from ycmd.responses import ServerError
from ycm.diagnostic_interface import DiagnosticInterface
from ycm.omni_completer import OmniCompleter
from ycm import syntax_parse
from ycm.client.ycmd_keepalive import YcmdKeepalive
from ycm.client.base_request import BaseRequest, BuildRequestData
from ycm.client.completer_available_request import SendCompleterAvailableRequest
from ycm.client.command_request import SendCommandRequest
from ycm.client.completion_request import ( CompletionRequest,
ConvertCompletionDataToVimData )
from ycm.client.omni_completion_request import OmniCompletionRequest
from ycm.client.event_notification import ( SendEventNotificationAsync,
EventNotification )
try:
from UltiSnips import UltiSnips_Manager
USE_ULTISNIPS_DATA = True
except ImportError:
USE_ULTISNIPS_DATA = False
def PatchNoProxy():
current_value = os.environ.get('no_proxy', '')
additions = '127.0.0.1,localhost'
os.environ['no_proxy'] = ( additions if not current_value
else current_value + ',' + additions )
# We need this so that Requests doesn't end up using the local HTTP proxy when
# talking to ycmd. Users should actually be setting this themselves when
# configuring a proxy server on their machine, but most don't know they need to
# or how to do it, so we do it for them.
# Relevant issues:
# https://github.com/Valloric/YouCompleteMe/issues/641
# https://github.com/kennethreitz/requests/issues/879
PatchNoProxy()
# Force the Python interpreter embedded in Vim (in which we are running) to
# ignore the SIGINT signal. This helps reduce the fallout of a user pressing
# Ctrl-C in Vim.
signal.signal( signal.SIGINT, signal.SIG_IGN )
HMAC_SECRET_LENGTH = 16
SERVER_SHUTDOWN_MESSAGE = (
"The ycmd server SHUT DOWN (restart with ':YcmRestartServer')." )
STDERR_FILE_MESSAGE = (
"Run ':YcmToggleLogs stderr' to check the logs." )
STDERR_FILE_DELETED_MESSAGE = (
"Logfile was deleted; set 'g:ycm_server_keep_logfiles' to see errors "
"in the future." )
CORE_UNEXPECTED_MESSAGE = (
'Unexpected error while loading the YCM core library.' )
CORE_MISSING_MESSAGE = (
'YCM core library not detected; you need to compile YCM before using it. '
'Follow the instructions in the documentation.' )
CORE_PYTHON2_MESSAGE = (
"YCM core library compiled for Python 2 but loaded in Python 3. "
"Set the 'g:ycm_server_python_interpreter' option to a Python 2 "
"interpreter path." )
CORE_PYTHON3_MESSAGE = (
"YCM core library compiled for Python 3 but loaded in Python 2. "
"Set the 'g:ycm_server_python_interpreter' option to a Python 3 "
"interpreter path." )
CORE_OUTDATED_MESSAGE = (
'YCM core library too old; PLEASE RECOMPILE by running the install.py '
'script. See the documentation for more details.' )
SERVER_IDLE_SUICIDE_SECONDS = 10800 # 3 hours
DIAGNOSTIC_UI_FILETYPES = set( [ 'cpp', 'cs', 'c', 'objc', 'objcpp' ] )
class YouCompleteMe( object ):
def __init__( self, user_options ):
self._user_options = user_options
self._user_notified_about_crash = False
self._diag_interface = DiagnosticInterface( user_options )
self._omnicomp = OmniCompleter( user_options )
self._latest_file_parse_request = None
self._latest_completion_request = None
self._latest_diagnostics = []
self._server_stdout = None
self._server_stderr = None
self._server_popen = None
self._filetypes_with_keywords_loaded = set()
self._ycmd_keepalive = YcmdKeepalive()
self._SetupServer()
self._ycmd_keepalive.Start()
self._complete_done_hooks = {
'cs': lambda self: self._OnCompleteDone_Csharp()
}
def _SetupServer( self ):
self._available_completers = {}
server_port = utils.GetUnusedLocalhostPort()
# The temp options file is deleted by ycmd during startup
with NamedTemporaryFile( delete = False, mode = 'w+' ) as options_file:
hmac_secret = os.urandom( HMAC_SECRET_LENGTH )
options_dict = dict( self._user_options )
options_dict[ 'hmac_secret' ] = utils.ToUnicode(
base64.b64encode( hmac_secret ) )
json.dump( options_dict, options_file )
options_file.flush()
args = [ paths.PathToPythonInterpreter(),
paths.PathToServerScript(),
'--port={0}'.format( server_port ),
'--options_file={0}'.format( options_file.name ),
'--log={0}'.format( self._user_options[ 'server_log_level' ] ),
'--idle_suicide_seconds={0}'.format(
SERVER_IDLE_SUICIDE_SECONDS )]
filename_format = os.path.join( utils.PathToCreatedTempDir(),
'server_{port}_{std}.log' )
self._server_stdout = filename_format.format( port = server_port,
std = 'stdout' )
self._server_stderr = filename_format.format( port = server_port,
std = 'stderr' )
args.append( '--stdout={0}'.format( self._server_stdout ) )
args.append( '--stderr={0}'.format( self._server_stderr ) )
if self._user_options[ 'server_keep_logfiles' ]:
args.append( '--keep_logfiles' )
self._server_popen = utils.SafePopen( args, stdin_windows = PIPE,
stdout = PIPE, stderr = PIPE )
BaseRequest.server_location = 'http://127.0.0.1:' + str( server_port )
BaseRequest.hmac_secret = hmac_secret
self._NotifyUserIfServerCrashed()
def IsServerAlive( self ):
returncode = self._server_popen.poll()
# When the process hasn't finished yet, poll() returns None.
return returncode is None
def _NotifyUserIfServerCrashed( self ):
if self._user_notified_about_crash or self.IsServerAlive():
return
self._user_notified_about_crash = True
try:
vimsupport.CheckFilename( self._server_stderr )
stderr_message = STDERR_FILE_MESSAGE
except RuntimeError:
stderr_message = STDERR_FILE_DELETED_MESSAGE
message = SERVER_SHUTDOWN_MESSAGE
return_code = self._server_popen.poll()
if return_code == server_utils.CORE_UNEXPECTED_STATUS:
message += ' ' + CORE_UNEXPECTED_MESSAGE + ' ' + stderr_message
elif return_code == server_utils.CORE_MISSING_STATUS:
message += ' ' + CORE_MISSING_MESSAGE
elif return_code == server_utils.CORE_PYTHON2_STATUS:
message += ' ' + CORE_PYTHON2_MESSAGE
elif return_code == server_utils.CORE_PYTHON3_STATUS:
message += ' ' + CORE_PYTHON3_MESSAGE
elif return_code == server_utils.CORE_OUTDATED_STATUS:
message += ' ' + CORE_OUTDATED_MESSAGE
else:
message += ' ' + stderr_message
vimsupport.PostVimMessage( message )
def ServerPid( self ):
if not self._server_popen:
return -1
return self._server_popen.pid
|
Babaritech/babar3 | back/babar_server/tests.py | Python | mit | 3,743 | 0 | import random
from decimal import Decimal, ROUND_HALF_UP
from django.test import TestCase
from django.core.validators import ValidationError
from .models import *
def setup():
"""
Create dummy data
"""
Status.objects.create(
name="Hero",
overdraft="0"
)
Status.objects.create(
name="Villain",
overdraft="250"
)
Customer.objects.create(
firstname="Bruce",
lastname="Wayne",
nickname="batman",
email="batman@gotham.com",
status=Status.objects.get(name="Hero")
)
Customer.objects.create(
firstname="James",
lastname="Gordon",
nickname="jim",
email="jim@gotham.com",
status=Status.objects.get(name="Hero")
)
Customer.objects.create(
firstname="Oswald",
lastname="Cobblepot",
nickname="penguin",
email="penguin@gotham.com",
status=Status.objects.get(name="Villain")
)
Product.objects.create(
name="Shotgun",
price="50.00"
)
Product.objects.create(
name="Umbrella",
price="5"
)
Payment.objects.create(
customer=Customer.objects.get(nickname="penguin"),
amount="1000"
)
class CustomerTests(TestCase):
def test_balance_calcul(self):
"""
Test balance is sum of payments minus sum of purchases
"""
setup()
amount = Decimal(200)
Payment.objects.create(
customer=Customer.objects.get(nickname="jim"),
amount=amount
)
for i in range(25):
if(random.choice((True, False))):
Purchase.objects.create(
customer=Customer.objects.get(nickname="jim"),
product=Product.objects.get(name="Umbrella")
| )
amount -= 5
else:
m = random.randrange(0, 20000) / 100
Payment.objects.create(
customer=Customer.objects.get(nickname="jim"),
amount=m
)
amount += Decimal(m)
self.assertEqual(
Customer.objec | ts.get(nickname="jim").balance,
amount.quantize(Decimal('.001'), rounding=ROUND_HALF_UP)
)
class PurchaseTests(TestCase):
def test_purchase_auto_amount(self):
"""
Test the amount field is automatically created
"""
setup()
p = Purchase.objects.create(
customer=Customer.objects.get(nickname="penguin"),
product=Product.objects.get(name="Umbrella")
)
self.assertEqual(Purchase.objects.get(pk=p.pk).amount, 5)
def test_purchase_no_money(self):
"""
Test that a purchase can't be made without enough balance
"""
setup()
Payment.objects.create(
customer=Customer.objects.get(nickname="batman"),
amount="49"
)
self.assertTrue(
Customer.objects.get(nickname="batman").balance
<
Product.objects.get(name="Shotgun").price
)
p = Purchase(
customer=Customer.objects.get(nickname="batman"),
product=Product.objects.get(name="Shotgun")
)
self.assertRaises(
ValidationError,
p.full_clean
)
class PaymentTests(TestCase):
def test_no_negative_payment(self):
"""
Test that there can't be a negative payment
"""
setup()
p = Payment(
customer=Customer.objects.get(nickname="penguin"),
amount="-24"
)
self.assertRaises(
ValidationError,
p.full_clean
)
|
bigdatafoundation/qngene | Export_gen_files/gentotsv.py | Python | apache-2.0 | 14,598 | 0.037608 | #!/usr/bin/python
#title :gentotsv.py
#description :Script to process impute files .gz and create a csv file
#author :Diego Alvarez
#date :2016-06-05
#python_version :3.5
#==============================================================================
import gzip
import os
import fnmatch
import csv
import sys
import getopt
import time
import linecache
import utils
import config
from multiprocessing import Pool, Process
import multiprocessing
from functools import partial
def script_usage():
print 'gentotsv.py -h<help> -t<threads> -s<sourcedir> -d<destinationdir> -f<samplefile>'
print '---------'
print 'If no parameters are passed, default values are taken from <config.py>'
print 'Default #threads = #processor cores'
print '----------------'
return
def get_gen_file_columns(p_source_dir,p_source_file):
with gzip.open(p_source_dir+p_source_file,'rb') as genfile:
utils.log(logger,"GEN file: "+ p_source_file)
columns=genfile.readline().split()
totalcolumns = len(columns)
utils.log(logger,"Columns in GEN file: "+str(totalcolumns))
genfile.close()
return totalcolumns
def create_sample_file(p_source_dir,p_destination_dir, p_source_file, p_file_type):
utils.log(logger,"Begin - create_sample_file -")
samplecountlines = 0
source_file = utils.get_file_name(str(p_source_file))
with open(p_destination_dir+"SAM_"+source_file+p_file_type, 'wb') as xfile:
utils.log(logger,"Reading file SAMPLE: " + p_source_file)
csvwriter = csv.writer(xfile,delimiter='\t',quotechar='"', quoting=csv.QUOTE_MINIMAL)
with open(p_source_dir+p_source_file,'rb') as samplefile:
INDfilelist = []
for line in samplefile:
samplecountlines=samplecountlines+1
if samplecountlines <= 2:
seq=str(samplecountlines * (-1)).split()
columns=line.split()
csvwriter.writerow(seq+columns)
#Start counting individuals
if samplecountlines > 2:
seq=str(samplecountlines-2).split()
columns=line.split()
col01= columns[0:2] #to create the file ID
csvwriter.writerow(seq+columns)
#Create empty INDIVIDUAL file
INDfilename = create_individuals_file(p_destination_dir, seq[0]+"_"+col01[0]+"_"+col01[1], p_file_type)
#Create list with Individuals file
INDfilelist.append(INDfilename)
samplefile.close()
xfile.close()
utils.log(logger,"SAMPLE file lines: "+ str(samplecountlines))
utils.log(logger,"End - create_sample_file -")
return INDfilelist
def create_individuals_sample_files(p_source_dir,p_destination_dir, p_source_file, p_file_type):
utils.log(logger,"Begin - create_individuals_sample_files -")
samplecountlines = 0
source_file = utils.get_file_name(str(p_source_file))
INDfilelist = []
with open(p_source_dir+p_source_file,'rb') as samplefile:
for line in samplefile:
samplecountlines = samplecountlines + 1
columns = line.split()
if samplecountlines == 1:
headerline = columns[:]
elif samplecountlines == 2:
datatypeline = columns[:]
else:
individualline = samplecountlines - 2
with open(p_destination_dir+"SAM_"+str(individualline)+"_"+str(columns[0])+"_"+str(columns[1])+p_file_type, 'wb') as xfile:
csvwriter = csv.writer(xfile,delimiter='\t',quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(0, len(columns)):
csvwriter.writerow([headerline[i]]+[datatypeline[i]]+[columns[i]])
#Create empty INDIVIDUAL file
INDfilename = create_individuals_file(p_destination_dir, str(individualline)+"_"+columns[0]+"_"+columns[1], p_file_type)
#Create list with Individuals file
INDfilelist.append(INDfilename)
xfile.close()
samplefile.close()
utils.log(logger,"SAMPLE file lines: "+ str(samplecountlines))
utils.log(logger,"End - create_individuals_sample_files -")
return INDfilelist
def create_snp_file(p_source_dir,p_destination_dir, p_source_file_type, p_dest_file_type):
|
utils.log(logger,"Begin - Create SNP file -")
filename = p_destination_dir+"SNP"+p_dest_file_type
open(filename, 'w').close()
for file_l | ist in sorted(os.listdir(p_source_dir)):
if fnmatch.fnmatch(file_list,'*'+p_source_file_type):
with gzip.open(p_source_dir+file_list,'rb') as genfile:
sequence=0
gencountlines=0
utils.log(logger,"Reading file GEN: " + file_list)
with open(filename,'ab') as SNPfile:
csvwriter = csv.writer(SNPfile,delimiter='\t',quotechar='"', quoting=csv.QUOTE_MINIMAL)
#readlines() Loads full .gen file into memory and split in lines. To many threads
# or very big files can cause memory overflow.
#for line in genfile.readlines():
for line in genfile: #Read file line by line
gencountlines=gencountlines+1
columns=line.split()
col05=columns[0:5]
source_file = utils.get_file_name(file_list)
sequence=sequence+1
seq=str(sequence).split()
csvwriter.writerow([source_file]+seq+col05)
SNPfile.close()
genfile.close()
utils.log(logger,"End - Create SNP file -")
return
def create_individuals_file(p_destination_dir, p_filename, p_file_type):
filename = p_destination_dir+"IND_"+p_filename+p_file_type
open(filename, 'w').close()
return filename
def convert_cols_to_lines(p_source_dir,p_source_file,p_destination_dir,p_dest_file_list, p_individualsposlist, p_gen_column):
utils.log(logger,"Begin - convert_gen_cols_to_ind_lines - ")
positionindex = p_individualsposlist.index(p_gen_column)
regex = r"^{0}.*{1}$".format(p_destination_dir+"IND_"+str(positionindex+1)+"_",destination_file_type)
p_indfilename = utils.find_file_in_list(p_dest_file_list,regex)
source_file = utils.get_file_name(str(p_source_file))
try:
col = int(p_gen_column)
except:
e = sys.exc_info()[0]
utils.log(logger,e)
#Open individuals file
with open(p_indfilename,'a') as indfile:
utils.log(logger,"Writing IND .tsv file: "+ p_indfilename)
csvwriter = csv.writer(indfile,delimiter='\t',quotechar='"', quoting=csv.QUOTE_MINIMAL)
sequence = 0
with gzip.open(p_source_dir+p_source_file,'rb') as genfile:
for line in genfile: #reads line by line .gen file.
#readlines() loads full .gen file into memory and split in lines. To many threads
# or very big files can cause memory overflow.
#for line in genfile.readlines():
sequence=sequence+1
seq=str(sequence).split()
columns=line.split()
csvwriter.writerow([source_file]+seq+columns[col:col+3])
indfile.close()
utils.log(logger,"Lines in source file: "+ str(sequence))
genfile.close()
utils.log(logger,"End - convert_gen_cols_to_ind_lines - ")
retu |
batiste/django-page-cms | pages/testproj/documents/views.py | Python | bsd-3-clause | 577 | 0 | from django.shortcuts import render
from pages.testproj.documents.models import Document
def document_view(request, *args, **kwargs):
context = kwargs
if kwargs.get('current_page', False):
documents = Document.objects.filter(page=kwargs['current_page'])
context['documents'] = documents
if 'document_id' in kwargs:
document | = Document.objects.get(pk=int(kwargs['document_i | d']))
context['document'] = document
context['in_document_view'] = True
return render(
request, 'pages/examples/index.html',
context)
|
AustinDKB/War-Card-Game-Simulator | war_sim.py | Python | gpl-2.0 | 4,472 | 0.003131 | import random
def war_handler():
war_handler.count += 1
print("War is {0} is happening.").format(war_handler.count)
if len(player_one) > 4 and len(player_two) > 4:
limiter = 3
elif len(player_one) == 4 or len(player_two) == 4:
limiter = 2
else: # len(player_one) < 4 or len(player_two) < 4:
if len(player_one) > len(player_two):
limiter = len(player_two) - 2
else: # len(player_one) < len(player_two)
limiter = len(player_one) - 2
w, x, y, z = 0, 0, 0, 0
for w in range(0, limiter):
temp.append(player_one[w])
temp.append(player_two[w])
random.shuffle(temp)
while x <= 4:
player_one.remove(player_one[0])
player_two.remove(player_two[0])
x += 1
if not player_one or not player_two:
print("\n\n\n\n\n")
else:
if player_one[0] > player_two[0]:
temp.append(player_one[0])
temp.append(player_two[0])
player_one.remove(player_one[0])
player_two.remove(player_two[0])
for y in range(0, len(temp)):
player_one.append(temp[y])
elif player_one[0] < player_two[0]:
temp.append(player_one[0])
temp.append(player_two[0])
player_one.remove(player_one[0])
player_two.remove(player_two[0])
for z in range(0, len(temp)):
player_two.append(temp[z])
else: #player_one[0] == player_two[0]:
war_handler()
print("##################################################")
print("# War Card game simulator! #")
print("# Written By: Austin Bakanec #")
print("# 99.97% Accurate #")
print("##################################################")
# Init important variables and get players names
player_one_name = raw_input("Whats Player one's name? ")
player_two_name = raw_input("Whats Player two's name? ")
player_one = []
player_two = []
temp = ""
i = 0
# Input the players cards!
while i <= 1:
# Get Player one's cards
if i == 0:
print("Input player {0}'s cards. Note: Capitals only!").format(player_one_name)
while temp != "END":
player_one.append(temp)
temp = raw_input("Card: ")
# Remove the blank in the array
if '' in player_one: player_one.remove('')
i += 1
# Get Player two's cards
if i == 1:
temp = ''
print("Input player {0}'s cards. Note: Capitals only!").format(player_two_name)
while temp != "END":
player_two.append(temp)
temp = raw_input("Card: ")
# Remove the blank in the array
if '' in player_two: player_two.remove('')
i += 1
# Switch the alphabet to numbers 1 - 10, j, q, k , a for each deck # Switch to dictionary
translation_list = ['J', 'Q', 'K', 'A']
translation_list_1 = ['11', '12', '13', '14']
for i in range(0, len(translation_list)):
player_one = [item.replace(translation_list[i], translation_list_1[i]) for item in player_one]
player_two = [item.replace(translation_list[i], translation_list_1[i]) for item in player_two]
# Make the lists integers
player_one = [int(num) for num in player_one]
player_two = [int(num) for num in player_two]
total_wars = 0
# Play the game!
while player_one and player_two:
temp = []
# War Sim
###################################################################################################################
if player_one[0] == player_two[0] and len(player_one) >= 4 and len(player_two) >= 4:
war_handler.count = 0
war_handler()
total_wars += war_handler.count
# War sim over
# Simple determination of winner
elif player_one[0] != player_two[0]:
temp.append(player_one[0])
temp.append(player_two[0])
random.shuffle(temp)
player_one.remove(player_one[0])
player_two.remove(player_two[0])
if player_one | > player_two:
player_one.append(temp[0])
player_one.append(temp[1])
else: # player_one < player_two
player_two.append(temp[0])
player_two.append(temp[1])
else:
continue
if not player_one:
print("Player {0} Wins!").format(player_two_name)
elif not player | _two:
print("Player {0} Wins!").format(player_one_name)
print("War Count: {0}").format(total_wars)
|
arnaudsj/titanium_mobile | support/android/builder.py | Python | apache-2.0 | 74,676 | 0.03076 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Appcelerator Titanium Mobile
# Copyright (c) 2011 by Appcelerator, Inc. All Rights Reserved.
# Licensed under the terms of the Apache Public License
# Please see the LICENSE included with this distribution for details.
#
# General builder script for staging, packaging, deploying,
# and debugging Titanium Mobile applications on Android
#
import os, sys, subprocess, shutil, time, signal, string, platform, re, glob, hashlib, imp, inspect
import run, avd, prereq, zipfile, tempfile, fnmatch, codecs, traceback, simplejson
from mako.template import Template
from os.path import splitext
from compiler import Compiler
from os.path import join, splitext, split, exists
from shutil import copyfile
from xml.dom.minidom import parseString
from tilogger import *
from datetime import datetime, timedelta
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
top_support_dir = os.path.dirname(template_dir)
sys.path.append(top_support_dir)
sys.path.append(os.path.join(top_support_dir, 'common'))
sys.path.append(os.p | ath.join(top_support_dir, 'module'))
fro | m tiapp import *
from android import Android
from androidsdk import AndroidSDK
from deltafy import Deltafy, Delta
from css import csscompiler
from module import ModuleDetector
import localecompiler
import fastdev
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store'];
ignoreDirs = ['.git','.svn','_svn', 'CVS'];
android_avd_hw = {'hw.camera': 'yes', 'hw.gps':'yes'}
res_skips = ['style']
log = None
# Copied from frameworks/base/tools/aapt/Package.cpp
uncompressed_types = [
".jpg", ".jpeg", ".png", ".gif",
".wav", ".mp2", ".mp3", ".ogg", ".aac",
".mpg", ".mpeg", ".mid", ".midi", ".smf", ".jet",
".rtttl", ".imy", ".xmf", ".mp4", ".m4a",
".m4v", ".3gp", ".3gpp", ".3g2", ".3gpp2",
".amr", ".awb", ".wma", ".wmv"
]
MIN_API_LEVEL = 7
def render_template_with_tiapp(template_text, tiapp_obj):
t = Template(template_text)
return t.render(tiapp=tiapp_obj)
def remove_ignored_dirs(dirs):
for d in dirs:
if d in ignoreDirs:
dirs.remove(d)
# ZipFile.extractall introduced in Python 2.6, so this is workaround for earlier
# versions
def zip_extractall(zfile, target_dir):
file_infos = zfile.infolist()
for info in file_infos:
if info.file_size > 0:
file_path = os.path.join(target_dir, os.path.normpath(info.filename))
parent_path = os.path.dirname(file_path)
if not os.path.exists(parent_path):
os.makedirs(parent_path)
out_file = open(file_path, "wb")
out_file.write(zfile.read(info.filename))
out_file.close()
def dequote(s):
if s[0:1] == '"':
return s[1:-1]
return s
def pipe(args1,args2):
p1 = subprocess.Popen(args1, stdout=subprocess.PIPE)
p2 = subprocess.Popen(args2, stdin=p1.stdout, stdout=subprocess.PIPE)
return p2.communicate()[0]
def read_properties(propFile, separator=":= "):
propDict = dict()
for propLine in propFile:
propDef = propLine.strip()
if len(propDef) == 0:
continue
if propDef[0] in ( '!', '#' ):
continue
punctuation= [ propDef.find(c) for c in separator ] + [ len(propDef) ]
found= min( [ pos for pos in punctuation if pos != -1 ] )
name= propDef[:found].rstrip()
value= propDef[found:].lstrip(separator).rstrip()
propDict[name]= value
propFile.close()
return propDict
def info(msg):
log.info(msg)
def debug(msg):
log.debug(msg)
def warn(msg):
log.warn(msg)
def trace(msg):
log.trace(msg)
def error(msg):
log.error(msg)
def copy_all(source_folder, dest_folder, ignore_dirs=[], ignore_files=[], ignore_exts=[], one_time_msg=""):
msg_shown = False
for root, dirs, files in os.walk(source_folder):
for d in dirs:
if d in ignore_dirs:
dirs.remove(d)
for f in files:
if f in ignore_files:
continue
ext = os.path.splitext(f)[1]
if ext in ignore_exts:
continue
if one_time_msg and not msg_shown:
info(one_time_msg)
msg_shown = True
from_ = os.path.join(root, f)
to_ = from_.replace(source_folder, dest_folder, 1)
to_directory = os.path.split(to_)[0]
if not os.path.exists(to_directory):
os.makedirs(to_directory)
shutil.copyfile(from_, to_)
def remove_orphaned_files(source_folder, target_folder):
is_res = source_folder.endswith('Resources') or source_folder.endswith('Resources' + os.sep)
for root, dirs, files in os.walk(target_folder):
for f in files:
full = os.path.join(root, f)
rel = full.replace(target_folder, '')
if rel[0] == os.sep:
rel = rel[1:]
is_orphan = False
if not os.path.exists(os.path.join(source_folder, rel)):
is_orphan = True
# But it could be under android/... too (platform-specific)
if is_orphan and is_res:
if os.path.exists(os.path.join(source_folder, 'android', rel)):
is_orphan = False
if is_orphan:
os.remove(full)
def is_resource_drawable(path):
if re.search("android/images/(high|medium|low|res-[^/]+)/", path.replace(os.sep, "/")):
return True
else:
return False
def resource_drawable_folder(path):
if not is_resource_drawable(path):
return None
else:
pattern = r'/android/images/(high|medium|low|res-[^/]+)/'
match = re.search(pattern, path.replace(os.sep, "/"))
if not match.groups():
return None
folder = match.groups()[0]
if re.match('high|medium|low', folder):
return 'drawable-%sdpi' % folder[0]
else:
return 'drawable-%s' % folder.replace('res-', '')
class Builder(object):
def __init__(self, name, sdk, project_dir, support_dir, app_id):
self.top_dir = project_dir
self.project_tiappxml = os.path.join(self.top_dir,'tiapp.xml')
self.project_dir = os.path.join(project_dir,'build','android')
self.res_dir = os.path.join(self.project_dir,'res')
self.platform_dir = os.path.join(project_dir, 'platform', 'android')
self.project_src_dir = os.path.join(self.project_dir, 'src')
self.project_gen_dir = os.path.join(self.project_dir, 'gen')
self.name = name
self.app_id = app_id
self.support_dir = support_dir
self.compiled_files = []
self.force_rebuild = False
self.debugger_host = None
self.debugger_port = -1
self.fastdev_port = -1
self.fastdev = False
temp_tiapp = TiAppXML(self.project_tiappxml)
if temp_tiapp and temp_tiapp.android and 'tool-api-level' in temp_tiapp.android:
self.tool_api_level = int(temp_tiapp.android['tool-api-level'])
else:
self.tool_api_level = MIN_API_LEVEL
self.sdk = AndroidSDK(sdk, self.tool_api_level)
self.tiappxml = temp_tiapp
self.set_java_commands()
# start in 1.4, you no longer need the build/android directory
# if missing, we'll create it on the fly
if not os.path.exists(self.project_dir) or not os.path.exists(os.path.join(self.project_dir,'AndroidManifest.xml')):
android_creator = Android(name, app_id, self.sdk, None, self.java)
parent_dir = os.path.dirname(self.top_dir)
if os.path.exists(self.top_dir):
android_creator.create(parent_dir, project_dir=self.top_dir, build_time=True)
else:
android_creator.create(parent_dir)
self.force_rebuild = True
sys.stdout.flush()
# we place some files in the users home
if platform.system() == "Windows":
self.home_dir = os.path.join(os.environ['USERPROFILE'], '.titanium')
self.android_home_dir = os.path.join(os.environ['USERPROFILE'], '.android')
else:
self.home_dir = os.path.join(os.path.expanduser('~'), '.titanium')
self.android_home_dir = os.path.join(os.path.expanduser('~'), '.android')
if not os.path.exists(self.home_dir):
os.makedirs(self.home_dir)
self.sdcard = os.path.join(self.home_dir,'android2.sdcard')
self.classname = Android.strip_classname(self.name)
def set_java_commands(self):
self.jarsigner = "jarsigner"
self.javac = "javac"
self.java = "java"
if platform.system() == "Windows":
if os.environ.has_key("JAVA_HOME"):
home_jarsigner = os.path.join(os.environ["JAVA_HOME"], "bin", "jarsigner.exe")
home_javac = os.path.join(os.environ["JAVA_HOME"], "bin", "javac.exe")
home_java = os.path.join(os.environ["JAVA_HOME"], "bin", "java.exe")
found = True
# TODO Document this path and test properly under windows
if os.path.exists(home_jarsigner):
self.jarsigner = home_jarsigner |
cmdunkers/DeeperMind | Players/Connect4_RandomAI.py | Python | bsd-3-clause | 894 | 0.00783 | from Player import Player
import random
import numpy
import time
class Connect4_RandomAI(Player):
def __init__(self,value):
# In | itialize the random number generator using the current time as a seed so that we don't see the exact same AI
# behavior every time.
random.seed(time.clock())
self.value = value
#end def init()
'''
'''
def value(self):
return self.value;
#end def value()
'''
'''
def DetermineMove(se | lf, board):
#####
# Pick a random valid column to drop our disc in
#####
# Keep generating random moves until we get one that is valid
while True:
move = numpy.array([0, random.randint(0, board.width - 1)])
if board.IsValidMove(move):
break
return move
#end def DetermineMove()
#end class Connect4_Human
|
langcog/wordbank | instruments/schemas/French_Quebecois_WG.py | Python | gpl-2.0 | 67,512 | 0.01182 | from django.db import models
from instruments.base import BaseTable
class French_Quebecois_WG(BaseTable):
item_1_choices = [('understands', 'understands'), ('produces', 'produces')]
item_1 = models.CharField(max_length=11, choices=item_1_choices, null=True)
item_2_choices = [('understands', 'understands'), ('produces', 'produces')]
item_2 = models.CharField(max_length=11, choices=item_2_choices, null=True)
item_3_choices = [('understands', 'understands'), ('produces', 'produces')]
item_3 = models.CharField(max_length=11, choices=item_3_choices, null=True)
item_4_choices = [('understands', 'understands'), ('produces', 'produces')]
item_4 = models.CharField(max_length=11, choices=item_4_choices, null=True)
item_5_choices = [('understands', 'understands'), ('produces', 'produces')]
item_5 = models.CharField(max_length=11, choices=item_5_choices, null=True)
item_6_choices = [('understands', 'understands'), ('produces', 'produces')]
item_6 = models.CharField(max_length=11, choices=item_6_choices, null=True)
item_7_choices = [('understands', 'understands'), ('produces', 'produces')]
item_7 = models.CharField(max_length=11, choices=item_7_choices, null=True)
item_8_choices = [('understands', 'understands'), ('produces', 'produces')]
item_8 = models.CharField(max_length=11, choices=item_8_choices, null=True)
item_9_choices = [('understands', 'understands'), ('produces', 'produces')]
item_9 = models.CharField(max_length=11, choices=item_9_choices, null=True)
item_10_choices = [('understands', 'understands'), ('produces', 'produces')]
item_10 = models.CharField(max_length=11, choices=item_10_choices, null=True)
item_11_choices = [('understands', 'understands'), ('produces', 'produces')]
item_11 = models.CharField(max_length=11, choices=item_11_choices, null=True)
item_12_choices = [('understands', 'understands'), ('produces', 'produces')]
item_12 = models.CharField(max_length=11, choices=item_12_choices, null=True)
item_13_choices = [('understands', 'understands'), ('produces', 'produces')]
item_13 = models.CharField(max_length=11, choices=item_13_choices, null=True)
item_14_choices = [('understands', 'understands'), ('produces', 'produces')]
item_14 = models.CharField(max_length=11, choices=item_14_choices, null=True)
item_15_choices = [('understands', 'understands'), ('produces', 'produces')]
item_15 = models.CharField(max_length=11, choices=item_15_choices, null=True)
item_16_choices = [('understands', 'understands'), ('produces', 'produces')]
item_16 = models.CharField(max_length=11, choices=item_16_choices, null=True)
item_17_choices = [('understands', 'understands'), ('produces', 'produces')]
item_17 = models.CharField(max_length=11, choices=item_17_choices, null=True)
item_18_choices = [('understands', 'understands'), ('produces', 'produces')]
item_18 = models.CharField(max_length=11, choices=item_18_choices, null=True)
item_19_choices = [('understands', 'understands'), ('produces', 'produces')]
item_19 = models.CharField(max_length=11, choices=item_19_choices, null=True)
item_20_choices = [('understands', 'understands'), ('produces', 'produces')]
item_20 = models.CharField(max_length=11, choices=item_20_choices, null=True)
item_21_choices = [('understands', 'understands'), ('produces', 'produces')]
item_21 = models.CharField(max_length=11, choices=item_21_choices, null=True)
item_22_choices = [('understands', 'understands'), ('produces', 'produces')]
item_22 = models.CharField(max_length=11, choices=item_22_choices, null=True)
item_23_choices = [('understands', 'understands'), ('produces', 'produces')]
item_23 = models.CharField(max_length=11, choices=item_23_choices, null=True)
item_24_choices = [('understands', 'understands'), ('produces', 'produces')]
item_24 = models.CharField(max_length=11, choices=item_24_choices, null=True)
item_25_choices = [('understands', 'understands'), ('produces', 'produces')]
item_25 = models.CharField(max_length=11, choices=item_25_choices, null=True)
item_26_choices = [('understands', 'understands'), ('produces', 'produces')]
item_26 = models.CharField(max_length=11, choices=item_26_choices, null=True)
item_27_choices = [('understands', 'understands'), ('produces', 'produces')]
item_27 = models.CharField(max_length=11, choices=item_27_choices, null=True)
item_28_choices = [('understands', 'understands'), ('produces', 'produces')]
item_28 = models.CharField(max_length=11, choices=item_28_choices, null=True)
item_29_choices = [('understands', 'understands'), ('produces', 'produces')]
item_29 = models.CharField(max_length=11, choices=item_29_choices, null=True)
item_30_choices = [('understands', 'understands'), ('produces', 'produces')]
item_30 = models.CharField(max_length= | 11, choices=item_30_choices, null=True)
item_31_choices = [('understands', 'understands'), ('produces', 'produces')]
item_31 = models.CharField(max_length=11, choices=item_31_choices, null=True)
item_32_choices = [('understands', 'understands'), ('produces', 'produces')]
item_32 = models.CharField(max_length=11, choices=it | em_32_choices, null=True)
item_33_choices = [('understands', 'understands'), ('produces', 'produces')]
item_33 = models.CharField(max_length=11, choices=item_33_choices, null=True)
item_34_choices = [('understands', 'understands'), ('produces', 'produces')]
item_34 = models.CharField(max_length=11, choices=item_34_choices, null=True)
item_35_choices = [('understands', 'understands'), ('produces', 'produces')]
item_35 = models.CharField(max_length=11, choices=item_35_choices, null=True)
item_36_choices = [('understands', 'understands'), ('produces', 'produces')]
item_36 = models.CharField(max_length=11, choices=item_36_choices, null=True)
item_37_choices = [('understands', 'understands'), ('produces', 'produces')]
item_37 = models.CharField(max_length=11, choices=item_37_choices, null=True)
item_38_choices = [('understands', 'understands'), ('produces', 'produces')]
item_38 = models.CharField(max_length=11, choices=item_38_choices, null=True)
item_39_choices = [('understands', 'understands'), ('produces', 'produces')]
item_39 = models.CharField(max_length=11, choices=item_39_choices, null=True)
item_40_choices = [('understands', 'understands'), ('produces', 'produces')]
item_40 = models.CharField(max_length=11, choices=item_40_choices, null=True)
item_41_choices = [('understands', 'understands'), ('produces', 'produces')]
item_41 = models.CharField(max_length=11, choices=item_41_choices, null=True)
item_42_choices = [('understands', 'understands'), ('produces', 'produces')]
item_42 = models.CharField(max_length=11, choices=item_42_choices, null=True)
item_43_choices = [('understands', 'understands'), ('produces', 'produces')]
item_43 = models.CharField(max_length=11, choices=item_43_choices, null=True)
item_44_choices = [('understands', 'understands'), ('produces', 'produces')]
item_44 = models.CharField(max_length=11, choices=item_44_choices, null=True)
item_45_choices = [('understands', 'understands'), ('produces', 'produces')]
item_45 = models.CharField(max_length=11, choices=item_45_choices, null=True)
item_46_choices = [('understands', 'understands'), ('produces', 'produces')]
item_46 = models.CharField(max_length=11, choices=item_46_choices, null=True)
item_47_choices = [('understands', 'understands'), ('produces', 'produces')]
item_47 = models.CharField(max_length=11, choices=item_47_choices, null=True)
item_48_choices = [('understands', 'understands'), ('produces', 'produces')]
item_48 = models.CharField(max_length=11, choices=item_48_choices, null=True)
item_49_choices = [('understands', 'understands'), ('produces', 'produces')]
item_49 = models.CharField(max_length=11, choices=item_49_choices, null=True)
item_50_choices = [('understands', 'understands'), ('produces', 'produces')]
item_50 = models.CharField( |
danielbair/aeneas | aeneas/extra/__init__.py | Python | agpl-3.0 | 1,124 | 0 | #!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright | (C) 2013-2015, Re | adBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
aeneas.extra contains a collection of extra tools for aeneas,
mainly custom TTS engine wrappers.
"""
|
calpeyser/google-cloud-python | logging/google/cloud/logging/entries.py | Python | apache-2.0 | 7,721 | 0 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Log entries within the Google Stackdriver Logging API."""
import json
import re
from google.protobuf import any_pb2
from google.protobuf.json_format import Parse
from google.cloud.logging.resource import Resource
from google.cloud._helpers import _name_from_project_path
from google.cloud._helpers import _rfc3339_nanos_to_datetime
_LOGGER_TEMPLATE = re.compile(r"""
projects/ # static prefix
(?P<project>[^/]+) # initial letter, wordchars + hyphen
/logs/ # static midfix
(?P<name>[^/]+) # initial letter, wordchars + allowed punc
""", re.VERBOSE)
def logger_name_from_path(path):
"""Validate a logger URI path and get the logger name.
:type path: str
:param path: URI path for a logger API request.
:rtype: str
:returns: Logger name parsed from ``path``.
:raises: :class:`ValueError` if the ``path`` is ill-formed or if
the project from the ``path`` does not agree with the
``project`` passed in.
"""
return _name_from_project_path(path, None, _LOGGER_TEMPLATE)
class _BaseEntry(object):
"""Base class for TextEntry, StructEntry, ProtobufEntry.
:type payload: text or dict
:param payload: The payload passed as ``textPayload``, ``jsonPayload``,
or ``protoPayload``.
:type logger: :class:`google.cloud.logging.logger.Logger`
:param logger: the logger used to write the entry.
:type insert_id: text
:param insert_id: (optional) the ID used to identify an entry uniquely.
:type timestamp: :class:`datetime.datetime`
:param timestamp: (optional) timestamp for the entry
:type labels: dict
:param labels: (optional) mapping of labels for the entry
:type severity: str
:param severity: (optional) severity of event being logged.
:type http_request: dict
:param http_request: (optional) info about HTTP request associated with
the entry.
:type resource: :class:`~google.cloud.logging.resource.Resource`
:param resource: (Optional) Monitored resource of the entry
"""
def __init__(self, payload, logger, insert_id=None, timestamp=None,
labels=None, severity=None, http_request=None, resource=None):
self.payload = payload
self.logger = logger
self.insert_id = insert_id
self.timestamp = timestamp
self.labels = labels
self.severity = severity
self.http_request = http_request
self.resource = resource
@classmethod
def from_api_repr(cls, resource, client, loggers=None):
"""Factory: construct an entry given its API representation
:type resource: dict
:param resource: text entry resource representation returned from
the API
:type client: :class:`google.cloud.logging.client.Client`
:param client: Client which holds credentials and project
configuration.
:type loggers: dict
:param loggers:
(Optional) A mapping of logger fullnames -> loggers. If not
passed, the entry will have a newly-created logger.
:rtype: :class:`google.cloud.logging.entries._BaseEntry`
:returns: Text entry parsed from ``resource``.
"""
if loggers is None:
loggers = {}
logger_fullname = resource['logName']
logger = loggers.get(logger_fullname)
if logger is None:
logger_name = logger_name_from_path(logger_fullname)
logger = loggers[logger_fullname] = client.logger(logger_name)
payload = resource[cls._PAYLOAD_KEY]
insert_id = resource.get('insertId')
timestamp = resource.get('timestamp')
if timestamp is not None:
timestamp = _rfc3339_nanos_to_datetime(timestamp)
labe | ls = resource.get('labels')
severity = resource.get('severity')
http_request = resource.get('httpRequest')
monitored_resource_dict = resource.get('resource')
monitored_resource = None
if monitored_resource_dict is not None:
monitored_resour | ce = Resource._from_dict(monitored_resource_dict)
return cls(payload, logger, insert_id=insert_id, timestamp=timestamp,
labels=labels, severity=severity, http_request=http_request,
resource=monitored_resource)
class TextEntry(_BaseEntry):
"""Entry created with ``textPayload``.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
"""
_PAYLOAD_KEY = 'textPayload'
class StructEntry(_BaseEntry):
"""Entry created with ``jsonPayload``.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
"""
_PAYLOAD_KEY = 'jsonPayload'
class ProtobufEntry(_BaseEntry):
"""Entry created with ``protoPayload``.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
:type payload: str, dict or any_pb2.Any
:param payload: The payload passed as ``textPayload``, ``jsonPayload``,
or ``protoPayload``. This also may be passed as a raw
:class:`.any_pb2.Any` if the ``protoPayload`` could
not be deserialized.
:type logger: :class:`~google.cloud.logging.logger.Logger`
:param logger: the logger used to write the entry.
:type insert_id: str
:param insert_id: (optional) the ID used to identify an entry uniquely.
:type timestamp: :class:`datetime.datetime`
:param timestamp: (optional) timestamp for the entry
:type labels: dict
:param labels: (optional) mapping of labels for the entry
:type severity: str
:param severity: (optional) severity of event being logged.
:type http_request: dict
:param http_request: (optional) info about HTTP request associated with
the entry
:type resource: :class:`~google.cloud.logging.resource.Resource`
:param resource: (Optional) Monitored resource of the entry
"""
_PAYLOAD_KEY = 'protoPayload'
def __init__(self, payload, logger, insert_id=None, timestamp=None,
labels=None, severity=None, http_request=None, resource=None):
super(ProtobufEntry, self).__init__(
payload, logger, insert_id=insert_id, timestamp=timestamp,
labels=labels, severity=severity, http_request=http_request,
resource=resource)
if isinstance(self.payload, any_pb2.Any):
self.payload_pb = self.payload
self.payload = None
else:
self.payload_pb = None
def parse_message(self, message):
"""Parse payload into a protobuf message.
Mutates the passed-in ``message`` in place.
:type message: Protobuf message
:param message: the message to be logged
"""
# NOTE: This assumes that ``payload`` is already a deserialized
# ``Any`` field and ``message`` has come from an imported
# ``pb2`` module with the relevant protobuf message type.
Parse(json.dumps(self.payload), message)
|
tempbottle/ironpython3 | Tests/modules/type_related/array_test.py | Python | apache-2.0 | 10,771 | 0.013555 | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
'''
Tests for CPython's array module.
'''
#--IMPORTS---------------------------------------------------------------------
from iptest.assert_util import *
skiptest("silverlight")
import array
#--GLOBALS---------------------------------------------------------------------
#--HELPERS---------------------------------------------------------------------
#--TEST CASES------------------------------------------------------------------
def test_ArrayType():
AreEqual(array.ArrayType,
array.array)
def test_array___add__():
'''
TODO
'''
pass
def test_array___class__():
'''
TODO
'''
pass
def test_array___contains__():
'''
TODO
'''
pass
def test_array___copy__():
'''
TODO: revisit
'''
x = array.array('i', [1,2,3])
y = x.__copy__()
Assert(id(x) != id(y), "copy should | copy")
if is_cli or is | _silverlight:
#CodePlex 19200
y = x.__deepcopy__()
else:
y = x.__deepcopy__(x)
Assert(id(x) != id(y), "copy should copy")
def test_array___deepcopy__():
'''
TODO
'''
pass
def test_array___delattr__():
'''
TODO
'''
pass
def test_array___delitem__():
'''
TODO
'''
pass
def test_array___delslice__():
'''
TODO
'''
pass
def test_array___doc__():
'''
TODO
'''
pass
def test_array___eq__():
'''
TODO
'''
pass
def test_array___format__():
'''
TODO
'''
pass
def test_array___ge__():
'''
TODO
'''
pass
def test_array___getattribute__():
'''
TODO
'''
pass
def test_array___getitem__():
'''
TODO
'''
pass
def test_array___getslice__():
'''
TODO
'''
pass
def test_array___gt__():
'''
TODO
'''
pass
def test_array___hash__():
'''
TODO
'''
pass
def test_array___iadd__():
'''
TODO
'''
pass
def test_array___imul__():
'''
TODO
'''
pass
def test_array___init__():
'''
TODO: revist!
'''
#--I
for x in [ 0, 1, 2,
(2**8)-2, (2**8)-1, (2**8), (2**8)+1, (2**8)+2,
(2**16)-2, (2**16)-1, (2**16), (2**16)+1, (2**16)+2,
(2**32)-2, (2**32)-1,
]:
temp_array1 = array.array('I', [x])
AreEqual(temp_array1[0], x)
temp_array1 = array.array('I', [x, x])
AreEqual(temp_array1[0], x)
AreEqual(temp_array1[1], x)
for x in [ (2**32), (2**32)+1, (2**32)+2 ]:
AssertError(OverflowError, array.array, 'I', [x])
#--c
a = array.array('c', "stuff")
a[1:0] = a
b = array.array('c', "stuff"[:1] + "stuff" + "stuff"[1:])
AreEqual(a, b)
#--L
a = array.array('L', "\x12\x34\x45\x67")
AreEqual(1, len(a))
AreEqual(1732588562, a[0])
#--B
a = array.array('B', [0]) * 2L
AreEqual(2, len(a))
AreEqual("array('B', [0, 0])", str(a))
#--b
AreEqual(array.array('b', 'foo'), array.array('b', [102, 111, 111]))
def test_array___iter__():
'''
TODO
'''
pass
def test_array___le__():
'''
TODO
'''
pass
def test_array___len__():
'''
TODO
'''
pass
def test_array___lt__():
'''
TODO
'''
pass
def test_array___mul__():
'''
TODO
'''
pass
def test_array___ne__():
'''
TODO
'''
pass
def test_array___new__():
'''
TODO
'''
pass
def test_array___reduce__():
'''
TODO: revisit
'''
x = array.array('i', [1,2,3])
if is_cpython: #http://ironpython.codeplex.com/workitem/28211
AreEqual(repr(x.__reduce__()),
"(<type 'array.array'>, ('i', [1, 2, 3]), None)")
else:
AreEqual(repr(x.__reduce__()),
"(<type 'array.array'>, ('i', '\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'), None)")
def test_array___reduce_ex__():
'''
TODO: revisit
'''
x = array.array('i', [1,2,3])
if is_cpython: #http://ironpython.codeplex.com/workitem/28211
AreEqual(repr(x.__reduce_ex__(1)),
"(<type 'array.array'>, ('i', [1, 2, 3]), None)")
AreEqual(repr(x.__reduce_ex__()),
"(<type 'array.array'>, ('i', [1, 2, 3]), None)")
else:
AreEqual(repr(x.__reduce_ex__(1)),
"(<type 'array.array'>, ('i', '\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'), None)")
AreEqual(repr(x.__reduce_ex__()),
"(<type 'array.array'>, ('i', '\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'), None)")
def test_array___repr__():
'''
TODO
'''
pass
def test_array___rmul__():
'''
TODO
'''
pass
def test_array___setattr__():
'''
TODO
'''
pass
def test_array___setitem__():
'''
TODO
'''
pass
def test_array___setslice__():
'''
TODO
'''
pass
def test_array___sizeof__():
'''
TODO
'''
pass
def test_array___str__():
'''
TODO
'''
pass
def test_array___subclasshook__():
'''
TODO
'''
pass
def test_array_append():
'''
TODO
'''
pass
def test_array_buffer_info():
'''
TODO
'''
pass
def test_array_byteswap():
'''
TODO
'''
pass
def test_array_count():
'''
TODO
'''
pass
def test_array_extend():
'''
TODO
'''
pass
def test_array_fromfile():
'''
TODO
'''
pass
def test_array_fromlist():
'''
TODO
'''
pass
def test_array_fromstring():
'''
TODO
'''
pass
def test_array_fromunicode():
'''
TODO
'''
pass
def test_array_index():
'''
TODO
'''
pass
def test_array_insert():
'''
TODO
'''
pass
def test_array_itemsize():
'''
TODO
'''
pass
def test_array_pop():
'''
TODO
'''
pass
def test_array_read():
'''
TODO
'''
pass
def test_array_remove():
'''
TODO
'''
pass
def test_array_reverse():
'''
TODO
'''
pass
def test_array_tofile():
'''
TODO
'''
pass
def test_array_tolist():
'''
TODO
'''
pass
def test_array_tostring():
import array
AreEqual(array.array('u', u'abc').tostring(), 'a\x00b\x00c\x00')
def test_array_tounicode():
'''
TODO
'''
pass
def test_array_typecode():
'''
TODO: revisit
'''
x = array.array('i')
AreEqual(type(x.typecode), str)
def test_array_write():
'''
TODO
'''
pass
def test_cp9348():
test_cases = { ('c', "a") : "array('c', 'a')",
('b', "a") : "array('b', [97])",
('B', "a") : "array('B', [97])",
('u', u"a") : "array('u', u'a')",
('h', "\x12\x34") : "array('h', [13330])",
('H', "\x12\x34") : "array('H', [13330])",
('i', "\x12\x34\x45\x67") : "array('i', [1732588562])",
('I', "\x12\x34\x45\x67") : "array('I', [1732588562L])",
('I', "\x01\x00\x00\x00") : "array('I', [1L])",
('l', "\x12\x34\x45\x67") : "array('l', [1732588562])",
('L', "\x12\x34\x45\x67") : "array('L', [1732588562L])",
}
if is_cpython: #http://ironpython.codeplex.com/workitem/28 |
edisonlz/fruit | web_project/base/site-packages/gdata/tlslite/utils/OpenSSL_TripleDES.py | Python | apache-2.0 | 1,666 | 0.004802 | """OpenSSL/M2Crypto 3DES implementation."""
from cryptomath import *
from TripleDES import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_TripleDES(key, mode, IV)
class OpenSSL_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
cipherType = m2.des_ede3_cbc()
m2.cipher_init(context, cipherType, self.key, self. | IV, encrypt)
return context
def encrypt(self, plaintext):
TripleDES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return cipherte | xt
def decrypt(self, ciphertext):
TripleDES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will ignore it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return plaintext |
yoosw/printrun_etri | printrun/gui/control_printing.py | Python | gpl-3.0 | 6,280 | 0.007643 | import wx
import os
from printrun.utils import imagefile
def Print_Control(root, parentpanel):
if os.name == "nt":
root.text_loading_file = wx.StaticText(parentpanel, label=str(root.var_loading_file_name), pos=(75, 33))
else:
root.text_loading_file = wx.StaticText(parentpanel, label=str(root.var_loading_file_name), pos=(75, 35))
root.text_loading_file.SetFont(root.font_base)
root.text_loading_file.SetBackgroundColour(wx.WHITE)
#======================= gauge start
root.timer = wx.Timer(parentpanel, 1)
root.count = 0
root.gauge = wx.Gauge(parentpanel, range=99, pos=(56, 106), size=(470, 26))
root.Bind(wx.EVT_TIMER, root.TimerHandler)
root.timer = wx.Timer(root)
root.timer.Start(100)
# gauge count
root.text_gauge = wx.StaticText(parentpanel, label=str(root.var_loading_count) + "%", pos=(550, 102))
root.text_gauge.SetFont(root.font_gauge)
#======================= guage end
# past time
bmp_print_home = wx.Bitmap(imagefile("flexor/print_time.png"), wx.BITMAP_TYPE_ANY)
wx.StaticBitmap(parentpanel, -1, bmp_print_home, (405, 162))
root.text_print_time = wx.StaticText(parentpanel, label='00:00:00', pos=(510, 158))
root.text_print_time.SetFont(root.font_base)
# nozle, start
bmp_print_list = wx.Bitmap(imagefile("flexor/print_list.png"), wx.BITMAP_TYPE_PNG)
bmp_print_nozzle_temp1 = wx.Bitmap(imagefile("flexor/print_nozzle_temp1.png"), wx.BITMAP_TYPE_PNG)
bmp_print_nozzle_temp2 = wx.Bitmap(imagefile("flexor/print_nozzle_temp2.png"), wx.BITMAP_TYPE_PNG)
bmp_print_output_speed = wx.Bitmap(imagefile("flexor/print_output_speed.png"), wx.BITMAP_TYPE_PNG)
bmp_print_fan_speed = wx.Bitmap(imagefile("flexor/print_fan_speed.png"), wx.BITMAP_TYPE_PNG)
root.bmp_print_start = wx.Bitmap(imagefile("flexor/print_start.png"), wx.BITMAP_TYPE_PNG)
bmp_print_stop = wx.Bitmap(imagefile("flexor/print_stop.png"), wx.BITMAP_TYPE_PNG)
root.bmp_print_pause = wx.Bitmap(imagefile("flexor/print_pause.png"), wx.BITMAP_TYPE_PNG)
root.bmp_print_resume = wx.Bitmap(imagefile("flexor/print_resume.png"), wx.BITMAP_TYPE_PNG)
bmp_print_filament_ch = wx.Bitmap(imagefile("flexor/print_filamentch.png"), wx.BITMAP_TYPE_PNG)
bmp_print_emergency = wx.Bitmap(imagefile("flexor/print_emergency.png"), wx.BITMAP_TYPE_PNG)
# nozzle1
wx.StaticBitmap(parentpanel, -1, bmp_print_nozzl | e_temp1, (34, 233))
root.text_print_nozzle_temp1_set = wx.StaticText(parentpanel, label=("0" + u"\u00B0C"), pos=(111, 247))
root.text_print_nozzle_temp1_on = wx.StaticText(parentpanel, label=("0" + u"\u00B0C"), pos=(111, 281))
root.text_print_nozzle_temp1_set.SetFont(root.font_base)
root.text | _print_nozzle_temp1_on.SetFont(root.font_base)
root.text_print_nozzle_temp1_set.SetForegroundColour("#3399FF") # set text color
root.text_print_nozzle_temp1_on.SetForegroundColour("#FF3300")
# nozzle2
wx.StaticBitmap(parentpanel, -1, bmp_print_nozzle_temp2, (230, 233))
root.text_print_nozzle_temp2_set = wx.StaticText(parentpanel, label=("0" + u"\u00B0C"), pos=(305, 247))
root.text_print_nozzle_temp2_on = wx.StaticText(parentpanel, label=("0" + u"\u00B0C"), pos=(305, 281))
root.text_print_nozzle_temp2_set.SetFont(root.font_base)
root.text_print_nozzle_temp2_on.SetFont(root.font_base)
root.text_print_nozzle_temp2_set.SetForegroundColour("#3399FF") # set text color
root.text_print_nozzle_temp2_on.SetForegroundColour("#FF3300")
# print_speed, pan speed
wx.StaticBitmap(parentpanel, -1, bmp_print_output_speed, (26, 370))
wx.StaticBitmap(parentpanel, -1, bmp_print_fan_speed, (223, 358))
# swyoo 2015.09.15 for combobox select
#======================= combobox start
root.speed_values = ['50', '60', '70', '80', '90', '100', '110', '120', '130', '140', '150']
select_speed_val = ['50%', '60%', '70%', '80%', '90%', '100%', '110%', '120%', '130%', '140%', '150%']
root.pan_values = ['0', '25', '51', '76', '102', '127', '153', '178', '204', '229', '255']
select_pan_val = ['OFF', '10%', '20%', '30%', '40%', '50%', '60%', '70%', '80%', '90%', '100%']
if os.name == "nt":
root.speed_combo = wx.ComboBox(root.bitmap1, -1, value="100%", pos=(123, 380), size=(80, -1), choices=select_speed_val, style=wx.CB_READONLY)
else:
root.speed_combo = wx.ComboBox(parentpanel, -1, value="100%", pos=(123, 380), size=(80, 40), choices=select_speed_val, style=wx.CB_READONLY)
root.Bind(wx.EVT_COMBOBOX, root.On_Speed_Select, id = root.speed_combo.GetId())
root.speed_combo.SetFont(root.font_combo)
if os.name == "nt":
root.pan_combo = wx.ComboBox(root.bitmap1, -1, value="100%", pos=(310, 380), size=(80, -1), choices=select_pan_val, style=wx.CB_READONLY)
else:
root.pan_combo = wx.ComboBox(parentpanel, -1, value="100%", pos=(310, 380), size=(80, 40), choices=select_pan_val, style=wx.CB_READONLY)
root.Bind(wx.EVT_COMBOBOX, root.On_Pan_Select, id = root.pan_combo.GetId())
root.pan_combo.SetFont(root.font_combo)
#======================= combobox end
# start, pause, stop
if os.name == "nt":
dis_panel = root.bitmap1
else:
dis_panel = parentpanel
if os.name == "nt":
root.btn_bmp_print_list = wx.BitmapButton(dis_panel, -1, bmp_print_list, (34, 34), style=wx.NO_BORDER)
else:
root.btn_bmp_print_list = wx.BitmapButton(dis_panel, -1, bmp_print_list, (30, 30), style=wx.NO_BORDER)
root.btn_bmp_print_list.Bind(wx.EVT_BUTTON, root.loadfile)
root.btn_bmp_print_start = wx.BitmapButton(dis_panel, -1, root.bmp_print_start, (425, 234), style=wx.NO_BORDER)
root.btn_bmp_print_start.Bind(wx.EVT_BUTTON, root.printfile)
btn_bmp_print_stop = wx.BitmapButton(dis_panel, -1, bmp_print_stop, (563, 234), style=wx.NO_BORDER)
btn_bmp_print_stop.Bind(wx.EVT_BUTTON, root.off)
btn_bmp_print_filament_ch = wx.BitmapButton(dis_panel, -1, bmp_print_filament_ch, (428, 351), style=wx.NO_BORDER)
btn_bmp_print_filament_ch.Bind(wx.EVT_BUTTON, root.On_Filament_Change)
btn_bmp_print_emergency = wx.BitmapButton(dis_panel, -1, bmp_print_emergency, (561, 351), style=wx.NO_BORDER)
btn_bmp_print_emergency.Bind(wx.EVT_BUTTON, root.reset)
return
|
gappleto97/Senior-Project | common/bounty.py | Python | mit | 14,300 | 0.002587 | import os, pickle, re, sys, rsa
from common.safeprint import safeprint
from common.call import parse
from multiprocessing import Lock
from hashlib import sha256
global bountyList
global bountyLock
global bountyPath
global masterKey
bountyList = []
bountyLock = Lock()
bounty_path = "data" + os.sep + "bounties.pickle"
masterKey = rsa.PublicKey(*pickle.load(open("master_public_key.pickle", "rb")))
def getUTC():
from calendar import timegm
from time import gmtime
return timegm(gmtime())
class Bounty(object):
"""An object representation of a Bounty
Parts:
ip -- The ip address of the requesting node
btc -- The Bitcoin address of the requesting party
reward -- The reward amount in satoshis to be given over 24 hours
(x == 0 or 1440 <= x <= 100000000) (1440 is 1 satoshi/min)
ident -- A value set by the issuer to help manage its related files
timeout -- Unix time at which the bounty expires (defaults to 24 hours)
data -- A dictionary containing optional, additional information
author -- String which represents the group providing the Bounty
reqs -- Dict containing requirements keyed by the related python call
("sys.platform":"win32")
perms -- Dict containing the minimum required security policies
(if empty, most restrictive assumed)
key -- A tuple which contains the RSA n and e values for this Bounty
(required only when reward is 0)
sig -- A Bytes object of str(Bounty) signed by the above key
(required only when reward is 0)
TDL -- More to be defined in later versions
"""
def __repr__(self):
"""Gives a string representation of the bounty"""
output = "<Bounty: ip=" + str(self.ip) + ", btc=" + str(self.btc) + ", reward=" + str(self.reward)
output = output + ", id=" + str(self.ident) + ", timeout=" + str(self.timeout)
output = output + ", author=" + str(self.data.get('author'))
if self.data.get('reqs') != {} and isinstance(self.data.get('reqs'), dict):
output = output + ", reqs=" + str(sorted(self.data.get('reqs').items(), key=lambda x: x[0]))
if self.data.get('perms') != {} and isinstance(self.data.get('perms'), dict):
output = output + ", perms=" + str(sorted(self.data.get('perms').items(), key=lambda x: x[0]))
return output + ">"
def __eq__(self, other):
"""Determines whether the bounties are equal"""
try:
return (self.reward == other.reward) and (self.ident == other.ident) and (self.data == other.data)
except:
return other is not None
def __ne__(self, other):
"""Determines whether the bounties are unequal"""
try:
return not self.__eq__(other)
except:
return other is None
def __lt__(self, other):
"""Determines whether this bounty has a lower priority"""
try:
if self.reward < other.reward:
return True
elif self.timeout < other.timeout:
return True
else:
return False
except:
return other is not None
def __gt__(self, other):
"""Determines whether this bounty has a higher priority"""
try:
if self.reward > other.reward:
return True
elif self.timeout > other.timeout:
return True
else:
return False
except:
return other is None
def __le__(self, other):
"""Determines whether this bounty has a lower priority or is equal"""
boolean = self.__lt__(other)
if boolean:
return boolean
else:
return self.__eq__(other)
def __ge__(self, other):
"""Determines whether this bounty has a higher or is equal"""
boolean = self.__gt__(other)
if boolean:
return boolean
else:
return self.__eq__(other)
def __hash__(self):
return hash((self.__repr__(), str(self.data)))
def __init__(self, ipAddress, btcAddress, rewardAmount, **kargs):
"""Initialize a Bounty; constructor"""
self.ip = ipAddress
self.btc = btcAddress
self.reward = rewardAmount
self.ident = ''
if kargs.get('timeout') is not None:
self.timeout = kargs.get('timeout')
else:
self.timeout = getUTC() + 86400
self.data = {'author': '',
'reqs': {},
'perms': {}}
if kargs.get('ident') is not None:
self.ident = kargs.get('ident')
if kargs.get('dataDict') is not None:
self.data.update(kargs.get('dataDict'))
if kargs.get('keypair') is not None:
self.sign(kargs.get('keypair'))
def isValid(self):
"""Internal method which checks the Bounty as valid in the most minimal version
ip -- Must be in valid range
btc -- Must be in valid namespace
reward -- Must be in valid range
timeout -- Must be greater than the current time
"""
try:
safeprint("Testing IP address", verbosity=1)
if not checkIPAddressValid(self.ip):
return False
safeprint("Testing Bitcoin address", verbosity=1)
# The following is a soft check
# A deeper check will be needed in order to assure this is correct
if not checkBTCAddressValid(self.btc):
return False
safeprint("Testing reward and/or signiture validity", verbosity=1)
if self.reward not in range(1440, 100000001) or (not self.reward and self.checkSign()):
return False
safeprint("Testing timeout", verbosity=1)
if self.timeout < getUTC(): # check against current UTC
return False
safeprint("Testing bounty requirements", verbosity=1)
if parse(self.data.get('reqs')):
return 1
return -1
except:
return False
def isPayable(self, factor):
"""check if address has enough"""
return True # later make this a wrapper for pywallet.balance()
def checkSign(self):
"""check if the signature attatched to the Bounty is valid"""
try:
from rsa import verify, PublicKey
safeprint(keyList)
if self.data.get('cert'): # where key = (PublicKey.n, PublicKey.e)
expected = str(self).encode('utf-8')
data = self.data
n = data.get('key')[0]
e = data.get('key')[1]
if rsa.verify(str((n, e)).encode('utf-8'), data.get('cert'), masterKey):
return verify(expected, data.get('sig'), PublicKey(n, e))
return False
except:
return False
def sign(self, privateKey): # where privateKey is a priva | te key generated by rsa.PrivateKey()
"""Signa bounty and attach the key value"""
try:
from rsa import sign
expected = str(self).encode('utf-8')
self.data.update({'key': (privateKey.n, privateKey.e),
'sig': sign(expected, privateKey, 'SHA-256')})
exc | ept:
return False
def checkBTCAddressValid(address):
"""Check to see if a Bitcoin address is within the valid namespace. Will potentially give false positives based on leading 1s"""
if not re.match(re.compile("^[a-km-zA-HJ-Z1-9]{26,35}$"), address):
return False
decimal = 0
for char in address:
decimal = decimal * 58 + '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'.index(char)
bcbytes = ""
if sys.version_info[0] < 3:
"""long does not have a to_bytes() in versions less than 3. This is an equivalent function"""
bcbytes = (('%%0%dx' % (25 << 1) % decimal).decode('hex')[-25:])
else:
bcbytes = decimal.to_bytes(25, 'b |
pennyan/Smtlink | z3_interface/RewriteExpt.py | Python | bsd-3-clause | 15,337 | 0.006194 | # Copyright (C) 2015, University of British Columbia
# Written (originally) by Mark Greenstreet (13th March, 2014)
# Editted by Yan Peng (11th Nov. 2016)
#
# License: A 3-clause BSD license.
# See the LICENSE file distributed with this software
import collections
import ACL2_to_Z3
import z3
def prod(stuff):
""" prod(stuff):
compute the product (i.e. reduce with '*') of the elements of 'stuff'.
'stuff' must be iterable."""
return reduce(lambda x, y: x*y, stuff)
def longVal(x):
""" longVal(x):
if 'x' is a z3 constant (i.e. function of arity 0) whose value is an integer,
then return that integer as an python long
else return 'None'"""
if(hasattr(x, 'as_long')): return x.as_long()
elif(hasattr(x, 'numerator_as_long')):
if(x.denominator_as_long() == 1L): return x.numerator_as_long()
return None
# end longVal
class to_smt_w_expt(ACL2_to_Z3.ACL22SMT):
class ExptRewriteFailure(Exception): pass
def __init__(self, *args):
super(to_smt_w_expt, self).__init__(*args)
# I'm making the exponent have sort Real instead of Int because
# the translator turns integerp to isReal! That's because the z3
# solver (understandably) chokes on mixed integer/real polynomials.
self.expt = z3.Function('EXPT', z3.RealSort(), z3.RealSort(), z3.RealSort())
# self.b_sum = z3.Function('b_sum', z3.RealSort(), z3.RealSort(), z3.RealSort(), z3.RealSort(), z3.RealSort(), z3.RealSort(), z3.RealSort())
# self.b_expt = z3.Function('b_expt', z3.RealSort(), z3.RealSort(), z3.RealSort())
self.maxPowExpand = 10
def simplify(self, expr, **kwargs):
if(z3.is_expr(expr)): return z3.simplify(expr, **kwargs)
else: # assume that expr has already been 'simplified' to a constant.
return expr
def reportFun(self, report=None):
def print_msg(*args):
print ''.join([str(a) for a in args])
return None
def dont_print_msg(*args):
return None
if((report is None) or (report is False)): return dont_print_msg
elif(report is True): return print_msg
else: return report
def get_expt_rules(self, expr_list, report=None):
if(len(expr_list) == 0): return []
else: hyps = expr_list[0]
workQ = collections.deque() # expt calls we still need to examine
allQ = collections.deque() # all expt calls that we've seen
report = self.reportFun(report)
def enqueue(v):
# z3 ASTs are unhashable; so we'll use a brute-force
# list for now -- beware of the quadratic time to build the
# allQ and workQ lists if we ever work on big examples.
report('enque(', v, ')')
for w in allQ:
if(v.eq(w)): # have we already seen v ?
report(' already seen, no work to do')
| return
report | (' appending ', v, ' to allQ and workQ')
allQ.append(v)
workQ.append(v)
def xpt(x, n):
v = self.expt(x, n)
enqueue(v)
return v
def lookfor_expt(v):
if(v is None): return
elif(hasattr(v, "decl") and hasattr(v, "children")):
# hopefully, v is a z3 expression
if(v.decl().eq(self.expt)):
x = v.children()[0]
n = v.children()[1]
enqueue(self.expt(x, self.simplify(n, som=True)))
for nu in v.children(): lookfor_expt(nu)
def expt_rules():
rules = collections.deque()
solver = z3.Solver()
solver.set('arith.nl', False)
solver.add(hyps)
def show(p):
report('trying to show(', p, '):')
report(' hypotheses = ', solver)
solver.push()
solver.add(z3.Not(p))
outcome = solver.check()
s1 = ' the negation is ' + str(outcome)
if(outcome == z3.unsat):
report(s1, "; therefore the original claim is valid")
elif(outcome == z3.sat):
report(s1, "\n here's a counter-example to ", p, "\n ", solver.model())
elif(outcome == z3.unknown):
report(s1, "; therefore, the original claim is undecided")
else:
report(s1, "; how'd that happen?")
solver.pop()
return outcome == z3.unsat
def add_rule(p):
report('add_rule(', p, ')')
rules.append(p)
solver.add(p)
while(len(workQ) > 0):
v = workQ.pop()
x = v.children()[0]
n = v.children()[1]
report('rewriting expt(', x, ', ', n, ')')
# Many of the rules below should have guards to ensure that we don't
# accidentally say expt(x, n) is defined when x==0 and n < 0.
# Rather that figuring out # all of the corner cases, I first check to
# see if (x == 0) and (n < 0) is satisfiable. If so, this code just
# throws an exception. I could probably work out a better error message
# later.
# Now that we know that expt(x, n) is well-defined, we still need to be careful.
# Consider expt(x, n+m) where x==0, n==3, and m==(-2). In this case, expt(x, n+m)
# is well-defined, but we can't conclude:
# expt(x, n+m) == expt(x, n) * expt(x, m)
# Rather than working out lots of side conditions (and probably making a mistake),
# I just check to see if implies(hyps, x > 0), and then plunge ahead without fear.
# Of course, this means I don't generate all of the rules that I could, but I'll
# do that later if this simple version turns out to be useful.
def expt_rewrite_const(x2, n2):
if(n2 == 0): return z3.intVal(1)
elif((0 < n2) and (n2 <= self.maxPowExpand)):
add_rule(v == prod(map(lambda _: x2, range(n2))))
elif((-self.maxPowExpand <= n2) and (n2 < 0)):
add_rule(v*prod(map(lambda _: x2, range(-n2))) == 1)
if(not show(z3.Or(x != 0, n >= 0))):
raise ExptRewriteFailure('possible attempt to raise 0 to a negative power')
x_is_pos = show(x > 0)
x_is_nz = x_is_pos or show(x != 0)
x_is_z = (not x_is_nz) and show(x == 0)
n_is_pos = show(n > 0)
n_is_neg = (not n_is_pos) and show(n < 0)
n_is_z = (not n_is_pos) and (not n_is_neg) and show(n == 0)
if(n_is_z or x_is_z):
if(n_is_z): add_rule(v == 1)
elif(n_is_pos): add_rule(v == 0)
else: add_rule(v == z3.If(n == 0, 1, 0))
continue
elif(x_is_pos):
x_lt_1 = show(x < 1)
x_gt_1 = (not x_lt_1) and show(x > 1)
if((not x_lt_1) and (not x_gt_1) and show(x == 1)):
add_rule(v == 1)
continue
add_rule(v > 0)
else:
add_rule(z3.Implies(x > 0, v > 0))
if(x_is_nz): add_rule(z != 0)
else: add_rule(z3.Implies(z3.Or(x != 0, n==0), v != 0))
if((x.decl().name() == '*') and (len(x.children()) > 1)): # expt(x0*x1*..., n)
add_rule(v == prod(map(lambda y: xpt(y, n), x.children())))
elif((n.decl().name() == '+') and (len(n.children()) > 1)): # expt(x, n0+n1+...)
add_rule(v == prod(map(lambda m: xpt(x, m), |
waseem18/Shorten-it | main.py | Python | mit | 2,602 | 0.036126 | import webapp2
import os
import urllib2
from google.appengine.ext import db
import jinja2
import urlparse
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
class BaseHandler(webapp2.RequestHandler):
def render(self, template, **kw):
self.response.out.write(render_str(template, **kw))
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
alphabets = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',0,1,2,3,4,5,6,7,8,9]
class StoreUrl(db.Model):
lurl = db.StringProperty(required=True)
class MainHandler(BaseHandler) | :
def get(self):
self.render('welcome.html')
def post(self):
lurl = self.request.get('lurl')
lurl_instance = StoreUrl(lurl=lurl)
lurl_instance.put()
k = lurl_instance.key()
idd = k.id() #Here comes the ID of the stored url which I needed the most.
#This ID which is in deciml form should be converted into BASE62 form!
#Below is the algorithm of conversion.
| idd_c = idd%1000000
id_list = []
while(idd_c > 0):
rem = idd_c%62
id_list.append(rem)
idd_c = idd_c/62
id_list.reverse()
i=0
final_url = "http://shorten-it-app.appspot.com/"
while i<len(id_list):
x = alphabets[id_list[i]]
i = i+1
final_url = final_url + str(x)
j = StoreUrl.get_by_id(idd)
redirection_url = j.lurl
self.render('results.html',redirection_url=redirection_url,final_url=final_url)
#We have got the shortened url! Now the task is to link it to the long url:D
short_path_id = [] #List to store the six digits of the ID by reverse lookup through path of the shortened url.
path = urlparse.urlparse(final_url).path
j = 1
while j<len(path):
try:
short_path_id.append(alphabets.index(path[j]))
j = j + 1
except ValueError:
short_path_id.append(alphabets.index(int(path[j])))
j = j + 1
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
|
Pexego/sale_commission | __unported__/full_stock_traceability/mrp_production.py | Python | agpl-3.0 | 4,398 | 0.005231 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 Pexego (<www.pexego.es>). All Rights Reserved
# $Omar Castiñeira Saavedra$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
import netsvc
class mrp_production(osv.osv):
_inherit = 'mrp.production'
def action_confirm(self, cr, uid, ids, context=None):
"""extends this method for managing no_procurement_moves"""
if context is None: context = {}
picking_id = super(mrp_production, self).action_confirm(cr, uid, ids, context=context)
to_delete_moves = []
procurements_to_delete = []
picking_obj = self.pool.get('stock.picking').browse(cr, uid, picking_id)
for move in picking_obj.move_lines:
if move.product_id.miscible or move.product_id.not_do_procurement:
#not procurement
procurements = self.pool.get('procurement.order').search(cr, uid, [('move_id', '=', move.id)])
if procurements:
procurements_to_delete.extend(procurements)
if move.move_dest_id:
self.pool.get('stock.move').write(cr, uid, [move.move_dest_id.id], {'location_id': move.product_id.product_tmpl_id.property_raw.id})
if move.move_dest_id.product_id.miscible:
self.pool.get('stock.move').write(cr, uid, [m | ove.move_dest_id.id], {'state': 'assigned'})
to_delete_moves.append(move.id)
if move.product_id.track_all and not move.product_id.miscible:
default_prodlot, prodlot_location, default_qty, split = self.pool.get('stock.pr | oduction.lot').get_default_production_lot(cr, uid, move.location_id.id, move.product_id.id, self.pool.get('product.uom')._compute_qty(cr, uid, move.product_uom.id, move.product_qty, move.product_id.uom_id.id), deep=True)
if split:
new_moves = self.pool.get('stock.move').move_reserve_split(cr, uid, [move.id])
for new_move in new_moves:
self.write(cr, uid, ids, {'move_lines': [(4, new_move)]})
else:
vals = {}
if default_prodlot:
vals['prodlot_id'] = default_prodlot
if prodlot_location:
vals['location_id'] = prodlot_location
self.pool.get('stock.move').write(cr, uid, [move.id], vals)
if move.move_dest_id:
self.pool.get('stock.move').write(cr, uid, [move.move_dest_id.id], {'prodlot_id': default_prodlot})
if move.move_dest_id.product_id.not_do_procurement and prodlot_location:
self.pool.get('stock.move').write(cr, uid, [move.move_dest_id.id], {'location_id': prodlot_location})
self.pool.get('stock.move').write(cr, uid, to_delete_moves, {'move_dest_id': False, 'state': 'draft'})
self.pool.get('procurement.order').write(cr, uid, list(set(procurements_to_delete)), {'state': 'draft'})
self.pool.get('procurement.order').unlink(cr, uid, list(set(procurements_to_delete)))
self.pool.get('stock.move').unlink(cr, uid, to_delete_moves)
picking_obj = self.pool.get('stock.picking').browse(cr, uid, picking_id)
if not picking_obj.move_lines:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_write(uid, 'stock.picking', picking_obj.id, cr)
return picking_id
mrp_production() |
LarsMichelsen/pmatic | tests/TestAPI.py | Python | gpl-2.0 | 1,142 | 0.000876 | #!/usr/bin/env python
# encoding: utf-8
#
# pmatic - Python API for Homematic. Easy to use.
# Copyright (C) 2016 Lars Michelsen <lm@larsmichelsen.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public | License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should | have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Add Python 3.x behaviour to 2.7
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pmatic.api import AbstractAPI
class TestAPI(AbstractAPI):
def __del__(self):
pass
|
redisca/django-redisca | redisca/template/templatetags/builtin.py | Python | mit | 833 | 0 | from django.template import Library
register = Library()
@register.simple_tag(takes_context=True)
def assign(context, **kwargs):
"""
Usage:
{% assign hello="Hello Django" %}
"""
for key, value in kwargs.items():
context[key] = value
return ''
@register.filter
def get(content, key):
"""
Usage:
{% object|get:key|get:key %}
"""
if isinstance(content, dict):
return content.get(key, '')
if isinstance(content, object):
return g | etattr(content, key, '')
return ''
@register.simple_tag()
def call( | fn, *args, **kwargs):
"""
Usage:
{% call object.method *args **kwargs %}
Callable function should be decorated with
redisca.template.decorators.template_func.
"""
if callable(fn):
return fn(*args, **kwargs)
return fn
|
wimglenn/argboss | test_override_kwargs.py | Python | mit | 1,470 | 0.003401 | from override_kwargs import override_kwargs
from other_module import delegating_function, function
from datetime import datetime
from unittest import TestCase
def function_in_this_module(x=123):
"""hello I'm a docstring"""
return x
def MyClass(object):
def method_in_this_module(x=123):
return x
with override_kwargs('__main__', 'function_in_this_module', {'x': 69}) as f:
assert function_in_this_module() == 69
assert function_in_this_module.__doc__ == f.__doc__
assert function_in_this_module.__name__ == f.__name__
assert function_in_this_module() == 123
# with override_kwargs('__main__', 'MyClass.method_in_this_module', {'x': 69}) as f:
# assert method_in_this_module() == 69 == f()
# assert method_in_this_module.__doc__ == f.__doc__
# assert method_in_this_module.__name__ == f.__name__
# assert method_in_this_module() == 123
with override_kwargs('__main__', 'function', {'x': 69}):
assert function() == 69
assert function() == 123
with ov | erride_kwargs('other_module', 'ClassInOtherModule.method', {'x': 69}):
assert delegating_function() == 69
assert delegating_function() == 123
with override_kwargs('other_module', 'another_modu | le.another_function', {0: 69}):
assert delegating_function() == 69
assert delegating_function() == 123
then = datetime(year=1982, month=3, day=19)
with override_kwargs('__main__', 'datetime', {'year': 1982}):
assert datetime(year=2014, month=3, day=19) == then
|
anhstudios/swganh | data/scripts/templates/object/draft_schematic/space/capacitor/shared_heavy_battery_mk5.py | Python | mit | 462 | 0.047619 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.temp | late = "object/draft_schematic/space/capacitor/shared_heavy_battery_mk5.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFIC | ATIONS ####
return result |
openthread/silk | silk/tests/openthread/ot_test_off_mesh_route_traffic.py | Python | apache-2.0 | 8,855 | 0.002259 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test traffic flow to off-mesh addresses resulted from addition of off-mesh routes (on routers and FEDs).
"""
import enum
import random
import time
import unittest
from silk.config import wpan_constants as wpan
from silk.node.wpan_node import WpanCredentials
from silk.utils import process_cleanup
from silk.tools import wpan_table_parser
from silk.tools.wpan_util import verify_within
from silk.unit_tests.test_utils import random_string
import silk.hw.hw_resource as hwr
import silk.node.fifteen_four_dev_board as ffdb
import silk.tests.testcase as testcase
hwr.global_instance()
WAIT_TIME = 10
NUM_ROUTES = 3
NUM_ROUTES_LOCAL = 1
ON_MESH_PREFIX = "fd00:1234::"
OFF | _MESH_ROUTE_1 = "fd00:abba::"
OFF_MESH_ROUTE_2 = "fd00:cafe::"
OFF_MESH_ROUTE_3 = "fd00:baba::"
OFF_MESH_ADDR_1 = OFF_MESH_ROUTE_1 + "1"
OFF_MESH_ADDR | _2 = OFF_MESH_ROUTE_2 + "2"
OFF_MESH_ADDR_3 = OFF_MESH_ROUTE_3 + "3"
POLL_INTERVAL = 400
class TestOffMeshRouteTraffic(testcase.TestCase):
# Test description: Adding off-mesh routes (on routers and FEDs) and traffic flow to off-mesh addresses.
#
# Test topology:
#
# r1 ---- r2
# | |
# | |
# fed1 sed2
#
# The off-mesh-routes are added as follows:
# - `r1` adds `OFF_MESH_ROUTE_1`,
# - `r2` adds `OFF_MESH_ROUTE_2`,
# - `fed1` adds `OFF_MESH_ROUTE_3`.
#
# Traffic flow:
# - From `sed2` to an address matching `OFF_MESH_ROUTE_1` (verify it is received on `r1`),
# - From `r1` to an address matching `OFF_MESH_ROUTE_2` (verify it is received on `r2`),
# - From `r2` to an address matching `OFF_MESH_ROUTE_3` (verify it is received on `fed1`)
#
@classmethod
def hardware_select(cls: 'TestOffMeshRouteTraffic'):
cls.r1 = ffdb.ThreadDevBoard()
cls.fed1 = ffdb.ThreadDevBoard()
cls.r2 = ffdb.ThreadDevBoard()
cls.sed2 = ffdb.ThreadDevBoard()
cls.all_nodes = [cls.r1, cls.fed1, cls.r2, cls.sed2]
@classmethod
@testcase.setup_class_decorator
def setUpClass(cls: 'TestOffMeshRouteTraffic'):
# Check and clean up wpantund process if any left over
process_cleanup.ps_cleanup()
cls.hardware_select()
for device in cls.all_nodes:
device.set_logger(cls.logger)
cls.add_test_device(device)
device.set_up()
cls.network_data = WpanCredentials(network_name="SILK-{0:04X}".format(random.randint(0, 0xffff)),
psk="00112233445566778899aabbccdd{0:04x}".format(random.randint(0, 0xffff)),
channel=random.randint(11, 25),
fabric_id="{0:06x}dead".format(random.randint(0, 0xffffff)))
cls.thread_sniffer_init(cls.network_data.channel)
@classmethod
@testcase.teardown_class_decorator
def tearDownClass(cls: 'TestOffMeshRouteTraffic'):
for device in cls.device_list:
device.tear_down()
@testcase.setup_decorator
def setUp(self):
pass
@testcase.teardown_decorator
def tearDown(self):
pass
@testcase.test_method_decorator
def test01_disable_autoupdate_interface_address_on_ncp(self):
for node in self.all_nodes:
# Disable `AutoUpdateInterfaceAddrsOnNCP` feature on wpantund
# for all nodes. This ensures that added IPv6 address (on linux
# interface) are not pushed to NCP (and therefore are not
# on-mesh).
node.setprop("Daemon:IPv6:AutoUpdateInterfaceAddrsOnNCP", "false")
self.assertEqual(node.getprop("Daemon:IPv6:AutoUpdateInterfaceAddrsOnNCP"), "false")
@testcase.test_method_decorator
def test02_pairing(self):
# allowlisting between leader and router
self.r1.allowlist_node(self.r2)
self.r2.allowlist_node(self.r1)
# allowlisting between leader and end device
self.r1.allowlist_node(self.fed1)
self.fed1.allowlist_node(self.r1)
# allowlisting between router and sleepy-end-device
self.r2.allowlist_node(self.sed2)
self.sed2.allowlist_node(self.r2)
self.r1.form(self.network_data, "router")
self.r1.permit_join(60)
self.wait_for_completion(self.device_list)
self.logger.info(self.r1.ip6_lla)
self.logger.info(self.r1.ip6_thread_ula)
self.network_data.xpanid = self.r1.xpanid
self.network_data.panid = self.r1.panid
self.r2.join(self.network_data, "router")
self.wait_for_completion(self.device_list)
self.fed1.join(self.network_data, "end-node")
self.wait_for_completion(self.device_list)
self.sed2.join(self.network_data, "sleepy-end-device")
self.sed2.set_sleep_poll_interval(POLL_INTERVAL)
self.wait_for_completion(self.device_list)
self.assertTrue(self.r2.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_ROUTER)
@testcase.test_method_decorator
def test03_verify_off_mesh_routes(self):
# Add on-mesh prefix
self.r1.config_gateway(ON_MESH_PREFIX)
# The off-mesh-routes are added as follows:
# - `r1` adds OFF_MESH_ROUTE_1,
# - `r2` adds OFF_MESH_ROUTE_2,
# - `fed1` adds OFF_MESH_ROUTE_3.
self.r1.add_route_using_prefix(OFF_MESH_ROUTE_1)
self.r1.add_ip6_address_on_interface(OFF_MESH_ADDR_1, prefix_len=64)
self.wait_for_completion(self.device_list)
self.r2.add_route_using_prefix(OFF_MESH_ROUTE_2)
self.r2.add_ip6_address_on_interface(OFF_MESH_ADDR_2, prefix_len=64)
self.wait_for_completion(self.device_list)
self.fed1.add_route_using_prefix(OFF_MESH_ROUTE_3)
self.fed1.add_ip6_address_on_interface(OFF_MESH_ADDR_3, prefix_len=64)
self.wait_for_completion(self.device_list)
# Wait till network data is updated on r1, r2, and sed2 and they all see all
# the added off-mesh routes.
time.sleep(WAIT_TIME)
def check_off_mesh_routes():
# If a node itself adds a route, the route entry will be seen twice in
# its WPAN_THREAD_OFF_MESH_ROUTES list (one time as part of network-wide
# network data and again as part of the local network data). Note that
# `r1 and `r2` each add a route, while `sed2` does not.
r1_routes = wpan_table_parser.parse_list(self.r1.get(wpan.WPAN_THREAD_OFF_MESH_ROUTES))
self.assertEqual(len(r1_routes), NUM_ROUTES + NUM_ROUTES_LOCAL)
r2_routes = wpan_table_parser.parse_list(self.r2.get(wpan.WPAN_THREAD_OFF_MESH_ROUTES))
self.assertEqual(len(r2_routes), NUM_ROUTES + NUM_ROUTES_LOCAL)
sed2_routes = wpan_table_parser.parse_list(self.sed2.get(wpan.WPAN_THREAD_OFF_MESH_ROUTES))
self.assertEqual(len(sed2_routes), NUM_ROUTES)
verify_within(check_off_mesh_routes, WAIT_TIME)
@testcase.test_method_decorator
def test04_transmit_receive(self):
# Traffic from `sed2` to `OFF_MESH_ADDR_1` (verify that it is received on`r1`).
# Traffic from `r1` to `OFF_MESH_ADDR_2` (verify that it is received on `r2`),
# Traffic from `r2` to `OFF_MESH_ADDR_3` (verify that it is received on `fed1`)
class AddressType(enum.Enum):
Prefix = 0
addresses = [
(self.sed2, self.r1, AddressType.Prefix, OFF_MESH_ADDR_1),
(self.r1, self.r2, AddressType.Prefix, OFF_MESH_ADDR_2),
(self.r2, self.fed1, AddressType.Prefix, OFF_MESH_ADDR_3),
]
|
anupcshan/buddyfs | settings.py | Python | mit | 55 | 0 | BUDDY_P | ORT = 5000
DBPATH = '.'
RE | PONAME = ".buddyrepo"
|
skirpichev/omg | diofant/logic/__init__.py | Python | bsd-3-clause | 512 | 0 | """
Pack | age for handling logical expressions.
"""
from .boolalg import (ITE, And, Equivalent, Implies, Nand, Nor, Not, Or,
POSform, SOPform, Xor, bool_map, false, simplify_logic,
to_cnf, to_dnf, to_nnf, true)
from .inference import satisfiable
__all__ = ('ITE', 'And', 'Equivalent', 'Implies', 'Nand', 'Nor', 'Not', 'Or',
'POSform', 'SOPform', 'Xor', 'bool_map', 'false', 'simplify_logic',
'to_cnf', 'to_dnf', 'to_nnf', 'true', | 'satisfiable')
|
hellrich/coling2016 | python/compare_word_over_models.py | Python | gpl-3.0 | 2,208 | 0.000453 | import gensim
import sys
import collections
import codecs
import re
import math
from copy import deepcopy
def compare(word, topn, models):
words = {}
counts = {}
sim = {}
for m in models:
model = gensim.models.Word2Vec.load_word2vec_format(m, binary=True)
words[m] = model.most_similar(word, topn=topn)
return words
def common_with_limit(sequences, divider=2):
if len(sequences) < 2:
raise Exception("Need multiple sequences for comparisson")
if divider == 0:
raise Exception("Illegal divider 0")
limit = math.floor(max([len(s) for s in sequences]) / divider)
common = []
rest = sequences[1:]
assigned = [[] for i in range(len(rest))]
for i, entry in enumerate(sequences[0]):
matches = []
# check for each sequence
for sequence_id, sequence in enumerate(rest):
left = int(max(0, i - limit))
right = int(min(i + limit + 1, len(sequence)))
for j in range(left, right):
if j not in assigned[sequence_id] and sequence[j] == entry:
matches.append(j)
break
if len(matches) == len(rest):
common.append(entry)
for sequence_id in range(len(rest)):
assigned[sequence_id].append(matches[sequence_id])
return common
def intersection(sequences):
if len(sequences) < 2:
raise Exception("Need multiple sequences for comparisson")
inter = set(sequences[0])
for s in sequences[1:]:
inter = i | nter.intersection(set(s))
return inter
def main():
if len(sys.argv) < 2:
raise Exception(
"Provide 2+ arguments:\n\t1,word\n\t2+,model(s)")
word = sys.argv[1].lower()
models = sys.argv[2:]
words = compare(word, 20, models)
for key in words:
print(words[key])
for i in [5, 10, 20]:
ws | = [[word for word, sim in words[key][:i]]for key in words]
common = common_with_limit(ws)
print("common:", common, "--->", len(common), "@", i)
inter = intersection(ws)
print("intersection:", inter, "--->", len(inter), "@", i)
if __name__ == "__main__":
main()
|
YueLinHo/Subversion | subversion/bindings/swig/python/svn/repos.py | Python | apache-2.0 | 12,959 | 0.007099 | #
# repos.py: public Python interface for repos components
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
######################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
from libsvn.repos import *
from svn.core import _unprefix_names, Pool
_unprefix_names(locals(), 'svn_repos_')
_unprefix_names(locals(), 'SVN_REPOS_')
__all__ = filter(lambda x: x.lower().startswith('svn_'), locals().keys())
del _unprefix_names
# Names that are not to be exported
import svn.core as _svncore, svn.fs as _svnfs, svn.delta as _svndelta
# Available change actions
CHANGE_ACTION_MODIFY = 0
CHANGE_ACTION_ADD = 1
CHANGE_ACTION_DELETE = 2
CHANGE_ACTION_REPLACE = 3
class ChangedPath:
__slots__ = [ 'item_kind', 'prop_changes', 'text_changed',
'base_path', 'base_rev', 'path', 'added', 'action',
]
def __init__(self,
item_kind, prop_changes, text_changed, base_path, base_rev,
path, added, action=None):
self.item_kind = item_kind
self.prop_changes = prop_changes
self.text_changed = text_changed
self.base_path = base_path
self.base_rev = base_rev
self.path = path
if action not in [None, CHANGE_ACTION_MODIFY, CHANGE_ACTION_ADD,
CHANGE_ACTION_DELETE, CHANGE_ACTION_REPLACE]:
raise Exception("unsupported change type")
self.action = action
### it would be nice to avoid this flag. however, without it, it would
### be quite difficult to distinguish between a change to the previous
### revision (which has a base_path/base_rev) and a copy from some
### other path/rev. a change in path is obviously add-with-history,
### but the same path could be a change to the previous rev or a restore
### of an older version. when it is "change to previous", I'm not sure
### if the rev is always repos.rev - 1, or whether it represents the
### created or time-of-checkout rev. so... we use a flag (for now)
### Note: This flag is also set for replaced paths unlike self.action
### which is either set to CHANGE_ACTION_ADD or CHANGE_ACTION_REPLACE
self.added = added
class ChangeCollector(_svndelta.Editor):
"""An editor that, when driven, walks a revision or a transaction and
incrementally invokes a callback with ChangedPath instances corresponding to
paths changed in that revision.
Available Since: 1.2.0
"""
# BATON FORMAT: [path, base_path, base_rev]
def __init__(self, fs_ptr, root, pool=None, notify_cb=None):
"""Construct a walker over the svn_fs_root_t ROOT, which must
be in the svn_fs_t FS_PTR. Invoke NOTIFY_CB with a single argument
of type ChangedPath for each change under ROOT.
At this time, two ChangedPath objects will be passed for a path that had
been replaced in the revision/transaction. This may change in the future.
### Can't we deduce FS_PTR from ROOT?
### POOL is unused
"""
self.fs_ptr = fs_ptr
self.changes = { } # path -> ChangedPathEntry()
self.roots = { } # revision -> svn_svnfs_root_t
self.notify_cb = notify_cb
self.props = { }
self.fs_root = root
# Figger out the base revision and root properties.
if _svnfs.is_revision_root(self.fs_root):
rev = _svnfs.revision_root_revision(self.fs_root)
self.base_rev = rev - 1
self.props = _svnfs.revision_proplist(self.fs_ptr, rev)
else:
txn_name = _svnfs.txn_root_name(self.fs_root)
txn_t = _svnfs.open_txn(self.fs_ptr, txn_name)
self.base_rev = _svnfs.txn_base_revision(txn_t)
self.props = _svnfs.txn_proplist(txn_t)
def get_root_props(self):
return self.props
def get_changes(self):
return self.changes
def _send_change(self, path):
if self.notify_cb:
change = self.changes.get(path)
if change:
self.notify_cb(change)
def _make_base_path(self, parent_path, path):
idx = path.rfind('/')
if parent_path:
parent_path = parent_path + '/'
if idx == -1:
return parent_path + path
return parent_path + path[idx+1:]
def _get_root(self, rev):
try:
return self.roots[rev]
except KeyError:
pass
root = self.roots[rev] = _svnfs.revision_root(self.fs_ptr, rev)
return root
def open_root(self, base_revision, dir_pool=None):
return ('', '', self.base_rev) # dir_baton
def delete_entry(self, path, revision, parent_baton, pool=None):
base_path = self._make_base_path(parent_baton[1], path)
if _svnfs.is_dir(self._get_root(parent_baton[2]), base_path):
item_type = _svncore.svn_node_dir
else:
item_type = _svncore.svn_node_file
self.changes[path] = ChangedPath(item_type,
False,
False,
base_path, # base_path
parent_baton[2], # base_rev
path, # path
False, # added
CHANGE_ACTION_DELETE,
)
self._send_change(path)
def add_directory(self, path, parent_baton,
copyfrom_path, copyfrom_revision, dir_pool=None):
action = path in self.changes and CHANGE_ACTION_REPLACE \
or CHANGE_ACTION_ADD
self.changes[path] = ChangedPath(_svncore.svn_node_dir,
False,
False,
copyfrom_path, # base_path
copyfrom_revision, # base_rev
path, # path
True, # added
action,
)
if copyfrom_path and (copyfrom_revision != -1):
base_path = copyfrom_path
else:
base_path = path
base_rev = copyfrom_revision
return (path, base_path, base_rev) # dir_baton
def open_directory(self, path, parent_baton, base_revision, dir_pool=None):
base_path = self._make_base_path(parent_baton[1], path)
return (path, base_path, parent_baton[2]) # dir_baton
def change_dir_prop(self, dir_baton, name, value, pool=None):
dir_path = dir_baton[0]
if dir_path in self.changes:
self.changes[dir_path].prop_changes = True
else:
# can't be added or deleted, so this must be CHANGED
self.changes[dir_path] = ChangedPath(_svncore.svn_node_dir,
True,
False,
dir_baton[1], # base_path
| dir_baton[2], # base_rev
dir_path, # path
| False, # added
CHANGE_ACTION_MODIFY,
)
def add_file(self, path, parent_baton,
copyfrom_path, copyfrom_revision, file_pool=None):
action = path in self.changes and CHANGE_ACTION_REPLACE \
or CHAN |
pyfa-org/eos | eos/eve_obj/effect/repairs/ship_module_ancillary_remote_shield_booster.py | Python | lgpl-3.0 | 1,564 | 0 | # ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free So | ftware Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FI | TNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
import math
from eos.const.eve import AttrId
from eos.const.eve import EffectId
from eos.eve_obj.effect import EffectFactory
from eos.eve_obj.effect.helper_func import get_cycles_until_reload_generic
from .base import RemoteShieldRepairEffect
class ShipModuleAncillaryRemoteShieldBooster(RemoteShieldRepairEffect):
def get_cycles_until_reload(self, item):
return get_cycles_until_reload_generic(item, default=math.inf)
def get_rep_amount(self, item):
return item.attrs.get(AttrId.shield_bonus, 0)
EffectFactory.register_class_by_id(
ShipModuleAncillaryRemoteShieldBooster,
EffectId.ship_module_ancillary_remote_shield_booster)
|
sourabhg/HelpSet | app/Utils/__init__.py | Python | gpl-2.0 | 39 | 0.076923 | __al | l__=['ResponseWriter','serverSets' | ] |
qtproject/pyside-pyside | tests/QtMultimediaWidgets/qmultimediawidgets.py | Python | lgpl-2.1 | 2,146 | 0.010718 | #############################################################################
##
## Copyright (C) 2017 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
'''Test cases for QtMultimediaWidgets'''
import unittest
from helper import UsesQApplication
from PySide2.QtMultimediaWidgets import QGraphicsVideoItem, QVideoWidget
from PySide2.QtWidgets import QGraphicsScene, QGraphicsView, QVBoxLayout, QWidget
from PySide2.QtCore import QTimer
class MyWidget(QWidget):
def __init__(self):
| QWidget.__init__(self)
layout = QVBoxLayout(self)
layout.addWidget(QVideoWidget())
graphicsScene = QGraphicsScene()
graphicsView = QGraphicsView(graphicsScene)
graphicsScene.addItem(QGraphicsVideoI | tem())
layout.addWidget(graphicsView)
class QMultimediaWidgetsTest(UsesQApplication):
def testMultimediaWidgets(self):
w = MyWidget()
w.show()
timer = QTimer.singleShot(100, self.app.quit)
self.app.exec_()
if __name__ == '__main__':
unittest.main()
|
graphql-python/graphql-epoxy | tests/test_register_enum.py | Python | mit | 519 | 0.001927 | from graphql.core.type.definition import GraphQLEnumType
from epoxy.registry import TypeRegistry
from enum import Enum
def test_register_builtin_enum():
R = TypeRegistry()
@R
| class MyEnum(Enum):
FOO = 1
BAR = 2
BAZ = 3
enum = R.type('MyEnum')
assert isinstance(enum, GraphQLEnumType)
values = enum.get_values()
assert [v.name for v | in values] == ['FOO', 'BAR', 'BAZ']
assert [v.value for v in values] == [MyEnum.FOO.value, MyEnum.BAR.value, MyEnum.BAZ.value]
|
googleapis/synthtool | synthtool/gcp/gapic_bazel.py | Python | apache-2.0 | 13,771 | 0.001234 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Optional, Union
import os
import shutil
import tempfile
from synthtool import _tracked_paths, metadata, shell
from synthtool.log import logger
from synthtool.sources import git
GOOGLEAPIS_URL: str = git.make_repo_clone_url("googleapis/googleapis")
GOOGLEAPIS_PRIVATE_URL: str = git.make_repo_clone_url("googleapis/googleapis-private")
GOOGLEAPIS_DISCOVERY_URL: str = git.make_repo_clone_url(
"googleapis/googleapis-discovery"
)
DISCOVERY_ARTIFACT_MANAGER_URL: str = git.make_repo_clone_url(
"googleapis/discovery-artifact-manager"
)
LOCAL_GOOGLEAPIS: Optional[str] = os.environ.get("SYNTHTOOL_GOOGLEAPIS")
LOCAL_GOOGLEAPIS_DISCOVERY: Optional[str] = os.environ.get(
"SYNTHTOOL_GOOGLEAPIS_DISCOVERY"
)
LOCAL_DISCOVERY_ARTIFACT_MANAGER: Optional[str] = os.environ.get(
"SYNTHTOOL_DISCOVERY_ARTIFACT_MANAGER"
)
class GAPICBazel:
"""A synthtool component that can produce libraries using bazel build."""
def __init__(self):
self._ensure_dependencies_installed()
self._googleapis = None
self._googleapis_private = None
self._googleapis_discovery = None
self._discovery_artifact_manager = None
def py_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "python", False, **kwargs)
def go_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "go", False, **kwargs)
def node_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "nodejs", False, **kwargs)
def csharp_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "csharp", False, **kwargs)
def php_library(
self, service: str, version: str, clean_build: bool = False, **kwargs
) -> Path:
return self._generate_code(service, version, "php", clean_build, **kwargs)
def java_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(
service, version, "java", False, tar_strip_components=0, **kwargs
)
def ruby_library(self, service: str, version: str, **kwargs) -> Path:
return self._generate_code(service, version, "ruby", False, **kwargs)
def _generate_code(
self,
service: str,
version: str,
language: str,
clean_build: bool = False,
*,
private: bool = False,
discogapic: bool = False,
diregapic: bool = False,
proto_path: Union[str, Path] = None,
output_dir: Union[str, Path] = None,
bazel_target: str = None,
include_protos: bool = False,
proto_output_path: Union[str, Path] = None,
tar_strip_components: int = 1,
):
# Determine which googleapis repo to use
if discogapic:
api_definitions_repo = self._clone_discovery_artifact_manager()
api_definitions_repo_name = "discovery-artifact-manager"
elif diregapic:
api_definitions_repo = self._clone_googleapis_discovery()
api_definitions_repo_name = "googleapis-discovery"
elif private:
api_definitions_repo = self._clone_googleapis_private()
api_definitions_repo_name = "googleapis_private"
else:
api_definitions_repo = self._clone_googleapis()
api_definitions_repo_name = "googleapis"
# Confidence check: We should have a googleapis repo; if we do not,
# something went wrong, and we should abort.
if not api_definitions_repo:
raise RuntimeError(
f"Unable to generate {service}, the sources repository repository"
"is unavailable."
)
# Calculate proto_path if necessary.
if not bazel_target or include_protos:
# If bazel_target is not specified explicitly, we will need
# proto_path to calculate it. If include_protos is True,
# we will need the proto_path to copy the protos.
if not proto_path:
if bazel_target:
# Calculate proto_path from the full bazel target, which is
# in the format "//proto_path:target_name
proto_path = bazel_target.split(":")[0][2:]
else:
# If bazel_target is not specified, assume the protos are
# simply under google/cloud, where the most of the protos
# usually are.
proto_path = f"google/cloud/{service}/{version}"
protos = Path(proto_path)
if protos.is_absolute():
protos = protos.relative_to("/")
# Determine bazel target based on per-language patterns
# Java: google-cloud-{{assembly_name}}-{{version}}-java
# Go: gapi-cloud-{{assembly_name}}-{{version}}-go
# Python: {{assembly_name}}-{{version}}-py
# PHP: google-cloud-{{assembly_name}}-{{version}}-php
# Node.js: {{assembly_name}}-{{version}}-nodejs
# Ruby: google-cloud-{{assembly_name}}-{{version}}-ruby
# C#: google-cloud-{{assembly_name}}-{{version}}-csharp
if not bazel_target:
# Determine where the protos we are generating actually live.
# We can sometimes (but not always) determine this from the service
# and version; in other cases, the user must provide it outright.
parts = list(protos.parts)
while len(parts) > 0 and parts[0] != "google":
parts.pop(0)
if len(parts) == 0:
raise RuntimeError(
f"Cannot determine bazel_target from proto_path {protos}."
"Please set bazel_target explicitly."
)
if language == "python":
suffix = f"{service}-{version}-py"
elif language == | "nodejs":
suffix = f"{service}-{version}-nodejs"
elif language == "go":
suffix = f"gapi-{'-'.join(parts[1:])}-go"
else:
suffix = f"{'-'.join(parts)}-{language}"
bazel_target = f"//{os.path.sep.join(parts)}:{suffix}"
# Confidence check: Do we have protos where we think we should?
if not (api_definitions_repo / protos).exists():
raise FileNotFoun | dError(
f"Unable to find directory for protos: {(api_definitions_repo / protos)}."
)
if not tuple((api_definitions_repo / protos).glob("*.proto")):
raise FileNotFoundError(
f"Directory {(api_definitions_repo / protos)} exists, but no protos found."
)
if not (api_definitions_repo / protos / "BUILD.bazel"):
raise FileNotFoundError(
f"File {(api_definitions_repo / protos / 'BUILD.bazel')} does not exist."
)
# Ensure the desired output directory exists.
# If none was provided, create a temporary directory.
if not output_dir:
output_dir = tempfile.mkdtemp()
output_dir = Path(output_dir).resolve()
# Let's build some stuff now.
cwd = os.getcwd()
os.chdir(str(api_definitions_repo))
if clean_build:
logger.debug("Cleaning Bazel cache")
shell.run(["bazel", "clean", "--expunge", "--async"])
# Log |
javacruft/pylxd | pylxd/operation.py | Python | apache-2.0 | 3,139 | 0 | # Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dateutil.parser import parse as parse_date
from pylxd import base
class LXDOperation(base.LXDBase):
def operation_list(self):
(state, data) = self.connection.get_object('GET', '/1.0/operations')
return [operation.split('/1.0/operations/')[-1]
for operation in data['metadata']]
def operation_show(self, operation):
(state, data) = self.connection.get_object('GET', '/1.0/operations/%s'
% operation)
return {
'operation_create_time':
self.operation_create_time(operation, data.get('metadata')),
'operation_update_time':
self.operation_update_time(operation, data.get('metadata')),
'operation_status_code':
self.operation_status_code(operation, data.get('metadata'))
}
def operation_info(self, operation):
return self.connection.get_object('GET', '/1.0/operations/%s'
% operation)
def operation_create_time(self, operation, data):
if data is None:
(state, data) = self.connection.get_object(
'GET', '/1.0/operations/%s' % operation)
| data = data.get('metadata')
return parse_date(data['created_at']).strftime('%Y-%m-%d %H:%M:%S')
def operation_update_time(self, operation, data):
if data is None:
(state, data) = self.connection.get_object(
'GET', '/1.0/operations/%s' % operation)
data = data.get('metadata')
return parse_date( | data['updated_at']).strftime('%Y-%m-%d %H:%M:%S')
def operation_status_code(self, operation, data):
if data is None:
(state, data) = self.connection.get_object(
'GET', '/1.0/operations/%s' % operation)
data = data.get('metadata')
return data['status']
def operation_wait(self, operation, status_code, timeout):
return self.connection.get_status(
'GET', '/1.0/operations/%s/wait?status_code=%s&timeout=%s'
% (operation, status_code, timeout))
def operation_stream(self, operation, operation_secret):
return self.connection.get_ws(
'GET', '/1.0/operations/%s/websocket?secret=%s'
% (operation, operation_secret))
def operation_delete(self, operation):
return self.connection.get_status('DELETE', '/1.0/operations/%s'
% operation)
|
henkelis/sonospy | sonospy/brisa/upnp/control_point/msearch.py | Python | gpl-3.0 | 6,719 | 0.001786 | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php or see LICENSE file.
#
# Copyright (C) 2006 Fluendo, S.A. (www.fluendo.com).
# Copyright 2006, Frank Scholz <coherence@beebits.net>
# Copyright 2007-2008 Brisa Team <brisa-develop@garage.maemo.org>
""" Contains the MSearch class which can search for devices.
"""
from brisa.core import log
from brisa.core.network import parse_http_response
from brisa.core.network_senders import UDPTransport
from brisa.core.network_listeners import UDPListener
from brisa.utils.looping_call import LoopingCall
from brisa.upnp.upnp_defaults import UPnPDefaults
DEFAULT_SEARCH_TIME = UPnPDefaults.MSEARCH_DEFAULT_SEARCH_TIME
DEFAULT_SEARCH_TYPE = UPnPDefaults.MSEARCH_DEFAULT_SEARCH_TYPE
#DEFAULT_SEARCH_TYPE = "upnp:rootdevice"
class MSearch(object):
""" Represents a MSearch. Contains some control functions for starting and
stopping the search. While running, search will be repeated in regular
intervals specified at construction or passed to the start() method.
"""
msg_already_started = 'tried to start() MSearch when already started'
msg_already_stopped = 'tried to stop() MSearch when already stopped'
def __init__(self, ssdp, start=True, interval=DEFAULT_SEARCH_TIME,
ssdp_addr='239.255.255.250', ssdp_port=1900):
""" Constructor for the MSearch class.
@param ssdp: ssdp server instance that will receive new device events
and subscriptions
@param start: if True starts the search when constructed
@param interval: interval between searchs
@param ssdp_addr: ssdp address for listening (UDP)
@param ssdp_port: ssdp port for listening (UDP)
@type ssdp: SSDPServer
@type start: boolean
@type interval: float
@type ssdp_addr: string
@type ssdp_port integer
"""
self.ssdp = ssdp
self.ssdp_addr = ssdp_addr
self.ssdp_port = ssdp_port
self.search_type = DEFAULT_SEARCH_TYPE
self.udp_transport = UDPTransport()
# self.listen_udp = UDPListener(ssdp_addr, ssdp_port,
self.listen_udp = UDPListener(ssdp_addr, 2149, # WMP is not picked up if 1900 is used for source
data_callback=self._datagram_received,
shared_socket=self.udp_transport.socket)
self.loopcall = LoopingCall(self.double_discover)
if start:
self.start(interval)
def is_running(self):
""" Returns True if the search is running (it's being repeated in the
interval given).
@rtype: boolean
"""
return self.loopcall.is_running()
def start(self, interval=DEFAULT_SEARCH_TIME,
search_type=DEFAULT_SEARCH_TYPE):
""" Starts the search.
@param interval: interval between searchs. Default is 600.0 seconds
@param search_type: type of the search, default is "ssdp:all"
@type interval: float
@type search_type: string
"""
# interval = 30.0
if not self.is_running():
self.search_type = search_type
self.listen_udp.start()
# print ">>>>>>>>> interval: " + str(interval)
self.loopcall.start(interval, now=True)
log.debug('MSearch started')
else:
log.warning(self.msg_already_started)
def stop(self):
""" Stops the search.
"""
if self.is_running():
log.debug('MSearch stopped')
self.listen_udp.stop()
self.loopcall.stop()
else:
log.warning(self.msg_already_stopped)
def destroy(self):
""" Destroys and quits MSearch.
"""
if self.is_running():
self.stop()
self.listen_udp.destroy()
self.loopcall.destroy()
self._cleanup()
def double_discover(self, search_type=DEFAULT_SEARCH_TYPE):
""" Sends a MSearch imediatelly. Each call to this method will yield a
MSearch message, that is, it won't repeat automatically.
"""
# print "<<<<<<<<< start double discover >>>>>>>>>"
self.discover(search_type)
self.discover(search_type)
# print "<<<<<<<<< end double discover >>>>>>>>>"
def discover(self, type="ssdp:all"):
# def discover(self, type="upnp:rootdevice"):
""" Mounts and sends the discover message (MSearch).
@param type: search type
@type type: string
"""
# type = "urn:schemas-upnp-org:device:MediaServer:1"
type = "upnp:rootdevice"
# req = ['M-SEARCH * HTTP/1.1',
# 'HOST: %s:%d' % (self.ssdp_addr, self.ssdp_port),
# 'MAN: "ssdp:discover"',
# 'MX: 5',
# 'ST: ' + type, '', '']
# req = '\r\n'.join(req)
req = ['M-SEARCH * HTTP/1.1',
'HOST:%s:%d' % (self.ssdp_addr, self.ssdp_port),
'MAN:"ssdp:discover"',
# 'Host:%s:%d' % (self.ssdp_addr, self.ssdp_port),
# 'Man:"ssdp:discover"',
'MX:5',
'ST:' + type, '', '', '']
req = '\r\n'.join(req)
self.udp_transport.send_data(req, self.ssdp_addr, self.ssdp_port)
def _datagram_received(self, data, (host, port)):
""" Callback for the UDPListener when messages arrive.
@param data: raw data received
@param host: host where data came from
@param port: port where data came from
@type data: string
| @type host: string
@type port: integer
"""
# print "datagram_received start"
cmd, headers = parse_http_response(data)
if cmd[0] == 'HTTP/1.1' and cmd[1] == '200':
if self.ssdp != None:
if not self.ssdp.is_known_device(headers['usn']):
| log.debug('Received MSearch answer %s,%s from %s:%s',
headers['usn'], headers['st'], host, port)
# print "_datagram_received _register"
# print "_datagram_received headers: " + str(headers)
self.ssdp._register(headers['usn'],
headers['st'],
headers['location'],
headers['server'],
headers['cache-control'])
# print " datagram_received end"
def _cleanup(self):
""" Clean up references.
"""
self.ssdp = None
self.listen_udp = None
self.loopcall = None
|
eort/OpenSesame | opensesame_extensions/quick_switcher/quick_switcher.py | Python | gpl-3.0 | 2,188 | 0.023766 | #-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free So | ftware Foundation, either version 3 of the License, or
(at your option) any later | version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.py3compat import *
from libqtopensesame.extensions import base_extension
class quick_switcher(base_extension):
"""
desc:
The quick-switcher allows you to quickly navigate to items and
functions, and to quickly activate menu actions.
"""
# We need to update or fully refresh the dialog after several structural
# changes.
def event_startup(self):
self.d = None
def event_open_experiment(self, path):
self.d = None
def event_rename_item(self, from_name, to_name):
if self.d is not None:
self.d.rename_item(from_name, to_name)
def event_new_item(self, name, _type):
if self.d is not None:
self.d.add_item(name)
def event_delete_item(self, name):
if self.d is not None:
self.d.delete_item(name)
def event_purge_unused_items(self):
self.d = None
def event_regenerate(self):
self.d = None
def event_change_item(self, name):
if self.d is not None:
if self.experiment.items._type(name) == u'inline_script':
self.d.refresh_item(name)
def event_open_item(self, name):
if self.d is not None:
self.d.bump_item(name)
def init_dialog(self):
"""
desc:
Re-init the dialog.
"""
self.set_busy()
from quick_switcher_dialog.dialog import quick_switcher
self.d = quick_switcher(self.main_window)
self.set_busy(False)
def activate(self):
"""
desc:
Pops up the quick-switcher dialog.
"""
if not hasattr(self, u'd') or self.d is None:
self.init_dialog()
self.d.items_list_widget.sortItems()
self.d.exec_()
|
chillbear/django-role-permissions | docs/conf.py | Python | mit | 8,335 | 0.006599 | # -*- coding: utf-8 -*-
#
# django-role-permissions documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 9 09:02:16 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-role-permissions'
copyright = u'2013, Filipe Ximenes'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text | .
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) sty | le to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-role-permissionsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'django-role-permissions.tex', u'django-role-permissions Documentation',
u'Filipe Ximenes', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-role-permissions', u'django-role-permissions Documentation',
[u'Filipe Ximenes'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-role-permissions', u'django-role-permissions Documentation',
u'Filipe Ximenes', 'django-role-permissions', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footno |
mozilla/firefox-flicks | vendor-local/lib/python/celery/tests/utilities/test_saferef.py | Python | bsd-3-clause | 1,896 | 0 | from __future__ import absolute_import
from celery.utils.dispatch.saferef import safe_ref
from celery.tests.utils import Case
class Class1(object):
def x(self):
pass
def fun(obj):
pass
class Class2(object):
def __call__(self, obj):
pass
class SaferefTests(Case):
def setUp(self):
ts = []
ss = []
for x in xrange(5000):
t = Class1()
ts.append(t)
s = safe_ref(t.x, self._closure)
ss.append(s)
ts.append(fun)
ss.append(safe_ref(fun, self._closure))
for x in xrange(30):
t = Class2()
ts.append(t)
s = safe_ref(t, self._closure)
ss.append(s)
self.ts = ts
self.ss = ss
self.closureCount = 0
def tearDown(self):
del self.ts
del self.ss
def testIn(self):
"""Test the "in" operator for safe references (cmp)"""
for t in self.ts[:50]:
self.assertTrue(safe_ref(t.x) in self.ss)
def testValid(self):
"""Test that the references are valid (return instance methods)"""
for s in self.ss:
self.assertTrue(s())
def testShortCircuit(self):
"""Test that creation short-circuits to reuse existing references"""
sd = {}
for s in self.ss:
sd[s] = | 1
for t in self.ts:
if hasattr(t, 'x'):
self.assertIn(safe_ref(t.x), sd)
else:
self.assertIn(safe_ref(t), sd)
def testRepresentation(self):
"""Test that the reference object's representation works
XXX Doesn't currentl | y check the results, just that no error
is raised
"""
repr(self.ss[-1])
def _closure(self, ref):
"""Dumb utility mechanism to increment deletion counter"""
self.closureCount += 1
|
mrGeen/eden | modules/eden/menus.py | Python | mit | 70,805 | 0.00233 | # -*- coding: utf-8 -*-
""" Sahana Eden Menu Structure and Layout
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3MainMenu", "S3OptionsMenu"]
import re
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from layouts import *
# =============================================================================
class S3MainMenu(object):
""" The default configurations for the main application menu """
@classmethod
def menu(cls):
main_menu = MM()(
# Modules-menu, align-left
cls.menu_modules(),
# Service menus, align-right
# Note: always define right-hand items in reverse order!
cls.menu_help(right=True),
cls.menu_auth(right=True),
cls.menu_lang(right=True),
cls.menu_admin(right=True),
cls.menu_gis(right=True)
)
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
# ---------------------------------------------------------------------
# Modules Menu
# @todo: this is very ugly - cleanup or make a better solution
# @todo: probably define the menu explicitly?
#
menu_modules = []
all_modules = current.deployment_settings.modules
# Home always 1st
module = all_modules["default"]
menu_modules.append(MM(module.name_nice, c="default", f="index" | ))
auth = current.auth
# Modules to hide due to insufficient permissions
hidden_modules = auth.permission.hidden_modules()
has_role = auth.s3_has_role
# The Modules to display at the top level (in order)
for module_type in [1, 2, 3, 4, 5, 6, 7, 8, 9]:
for module in all_modules:
if module in hidden_modules: |
continue
_module = all_modules[module]
if (_module.module_type == module_type):
if not _module.access:
menu_modules.append(MM(_module.name_nice, c=module, f="index"))
else:
groups = re.split("\|", _module.access)[1:-1]
menu_modules.append(MM(_module.name_nice,
c=module,
f="index",
restrict=groups))
# Modules to display off the 'more' menu
modules_submenu = []
for module in all_modules:
if module in hidden_modules:
continue
_module = all_modules[module]
if (_module.module_type == 10):
if not _module.access:
modules_submenu.append(MM(_module.name_nice, c=module, f="index"))
else:
groups = re.split("\|", _module.access)[1:-1]
modules_submenu.append(MM(_module.name_nice,
c=module,
f="index",
restrict=groups))
if modules_submenu:
# Only show the 'more' menu if there are entries in the list
module_more_menu = MM("more", link=False)(modules_submenu)
menu_modules.append(module_more_menu)
return menu_modules
# -------------------------------------------------------------------------
@classmethod
def menu_lang(cls, **attr):
""" Language menu """
languages = current.response.s3.l10n_languages
request = current.request
settings = current.deployment_settings
if not settings.get_L10n_display_toolbar():
return None
menu_lang = MM("Language", **attr)
for language in languages:
menu_lang.append(MM(languages[language], r=request,
translate=False,
vars={"_language":language}))
return menu_lang
# -------------------------------------------------------------------------
@classmethod
def menu_help(cls, **attr):
""" Help Menu """
menu_help = MM("Help", **attr)(
MM("Contact us", c="default", f="contact"),
MM("About", c="default", f="about")
)
return menu_help
# -------------------------------------------------------------------------
@classmethod
def menu_auth(cls, **attr):
""" Auth Menu """
auth = current.auth
logged_in = auth.is_logged_in()
self_registration = current.deployment_settings.get_security_self_registration()
if not logged_in:
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
menu_auth = MM("Login", c="default", f="user", m="login",
_id="auth_menu_login",
vars=dict(_next=login_next), **attr)(
MM("Login", m="login",
vars=dict(_next=login_next)),
MM("Register", m="register",
vars=dict(_next=login_next),
check=self_registration),
MM("Lost Password", m="retrieve_password")
)
else:
# Logged-in
menu_auth = MM(auth.user.email, c="default", f="user",
translate=False, link=False, _id="auth_menu_email",
**attr)(
MM("Logout", m="logout", _id="auth_menu_logout"),
MM("User Profile", m="profile"),
MM("Personal Data", c="pr", f="person", m="update",
vars={"person.pe_id" : auth.user.pe_id}),
MM("Contact Details", c="pr", f="person",
args="contact",
vars={"person.pe_id" : auth.user.pe_id}),
#MM("Subscriptions", c="pr", f="person",
#args="pe_subscription",
#vars={"person.pe_id" : auth.user.pe_id}),
MM("Change Password", m="change_password"),
SEP(),
MM({"name": current.T("Rapid Data Entry"),
"id": "rapid_toggle",
"value": current.session.s3.rapid_data_entry is True},
f="rapid"),
)
return menu_auth
# ------------------------------------------------------ |
wylieswanson/agilepyfs | fs/expose/dokan/__init__.py | Python | bsd-3-clause | 37,866 | 0.005123 | """
fs.expose.dokan
===============
Expose an FS object to the native filesystem via Dokan.
This module provides the necessary interfaces t | o mount an FS object into
the local filesystem using Dokan on win32::
http://dokan-dev.net/en/
For simple usage, the function 'mount' takes an FS object | and a drive letter,
and exposes the given FS as that drive::
>>> from fs.memoryfs import MemoryFS
>>> from fs.expose import dokan
>>> fs = MemoryFS()
>>> mp = dokan.mount(fs,"Q")
>>> mp.drive
'Q'
>>> mp.path
'Q:\\'
>>> mp.unmount()
The above spawns a new background process to manage the Dokan event loop, which
can be controlled through the returned subprocess.Popen object. To avoid
spawning a new process, set the 'foreground' option::
>>> # This will block until the filesystem is unmounted
>>> dokan.mount(fs,"Q",foreground=True)
Any additional options for the Dokan process can be passed as keyword arguments
to the 'mount' function.
If you require finer control over the creation of the Dokan process, you can
instantiate the MountProcess class directly. It accepts all options available
to subprocess.Popen::
>>> from subprocess import PIPE
>>> mp = dokan.MountProcess(fs,"Q",stderr=PIPE)
>>> dokan_errors = mp.communicate()[1]
If you are exposing an untrusted filesystem, you may like to apply the
wrapper class Win32SafetyFS before passing it into dokan. This will take
a number of steps to avoid suspicious operations on windows, such as
hiding autorun files.
The binding to Dokan is created via ctypes. Due to the very stable ABI of
win32, this should work without further configuration on just about all
systems with Dokan installed.
"""
# Copyright (c) 2009-2010, Cloud Matrix Pty. Ltd.
# All rights reserved; available under the terms of the MIT License.
from __future__ import with_statement
import sys
import os
import signal
import errno
import time
import stat as statinfo
import subprocess
import cPickle
import datetime
import ctypes
from collections import deque
from fs.base import threading
from fs.errors import *
from fs.path import *
from fs.local_functools import wraps
from fs.wrapfs import WrapFS
try:
import libdokan
except (NotImplementedError,EnvironmentError,ImportError,NameError,):
is_available = False
sys.modules.pop("fs.expose.dokan.libdokan",None)
libdokan = None
else:
is_available = True
from ctypes.wintypes import LPCWSTR, WCHAR
kernel32 = ctypes.windll.kernel32
import logging
logger = logging.getLogger("fs.expose.dokan")
# Options controlling the behavior of the Dokan filesystem
DOKAN_OPTION_DEBUG = 1
DOKAN_OPTION_STDERR = 2
DOKAN_OPTION_ALT_STREAM = 4
DOKAN_OPTION_KEEP_ALIVE = 8
DOKAN_OPTION_NETWORK = 16
DOKAN_OPTION_REMOVABLE = 32
# Error codes returned by DokanMain
DOKAN_SUCCESS = 0
DOKAN_ERROR = -1
DOKAN_DRIVE_LETTER_ERROR = -2
DOKAN_DRIVER_INSTALL_ERROR = -3
DOKAN_START_ERROR = -4
DOKAN_MOUNT_ERROR = -5
# Misc windows constants
FILE_LIST_DIRECTORY = 0x01
FILE_SHARE_READ = 0x01
FILE_SHARE_WRITE = 0x02
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OVERLAPPED = 0x40000000
FILE_ATTRIBUTE_ARCHIVE = 32
FILE_ATTRIBUTE_COMPRESSED = 2048
FILE_ATTRIBUTE_DIRECTORY = 16
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_NORMAL = 128
FILE_ATTRIBUTE_OFFLINE = 4096
FILE_ATTRIBUTE_READONLY = 1
FILE_ATTRIBUTE_SYSTEM = 4
FILE_ATTRIBUTE_TEMPORARY = 4
CREATE_NEW = 1
CREATE_ALWAYS = 2
OPEN_EXISTING = 3
OPEN_ALWAYS = 4
TRUNCATE_EXISTING = 5
FILE_GENERIC_READ = 1179785
FILE_GENERIC_WRITE = 1179926
REQ_GENERIC_READ = 0x80 | 0x08 | 0x01
REQ_GENERIC_WRITE = 0x004 | 0x0100 | 0x002 | 0x0010
ERROR_ACCESS_DENIED = 5
ERROR_LOCK_VIOLATION = 33
ERROR_NOT_SUPPORTED = 50
ERROR_FILE_EXISTS = 80
ERROR_DIR_NOT_EMPTY = 145
ERROR_NOT_LOCKED = 158
ERROR_LOCK_FAILED = 167
ERROR_ALREADY_EXISTS = 183
ERROR_LOCKED = 212
ERROR_INVALID_LOCK_RANGE = 306
# Some useful per-process global information
NATIVE_ENCODING = sys.getfilesystemencoding()
DATETIME_ZERO = datetime.datetime(1,1,1,0,0,0)
DATETIME_STARTUP = datetime.datetime.utcnow()
FILETIME_UNIX_EPOCH = 116444736000000000
def handle_fs_errors(func):
"""Method decorator to report FS errors in the appropriate way.
This decorator catches all FS errors and translates them into an
equivalent OSError, then returns the negated error number. It also
makes the function return zero instead of None as an indication of
successful execution.
"""
name = func.__name__
func = convert_fs_errors(func)
@wraps(func)
def wrapper(*args,**kwds):
try:
res = func(*args,**kwds)
except OSError, e:
if e.errno:
res = -1 * _errno2syserrcode(e.errno)
else:
res = -1
except Exception, e:
raise
else:
if res is None:
res = 0
return res
return wrapper
# During long-running operations, Dokan requires that the DokanResetTimeout
# function be called periodically to indicate the progress is still being
# made. Unfortunately we don't have any facility for the underlying FS
# to make these calls for us, so we have to hack around it.
#
# The idea is to use a single background thread to monitor all active Dokan
# method calls, resetting the timeout until they have completed. Note that
# this completely undermines the point of DokanResetTimeout as it's now
# possible for a deadlock to hang the entire filesystem.
_TIMEOUT_PROTECT_THREAD = None
_TIMEOUT_PROTECT_LOCK = threading.Lock()
_TIMEOUT_PROTECT_COND = threading.Condition(_TIMEOUT_PROTECT_LOCK)
_TIMEOUT_PROTECT_QUEUE = deque()
_TIMEOUT_PROTECT_WAIT_TIME = 4 * 60
_TIMEOUT_PROTECT_RESET_TIME = 5 * 60 * 1000
def _start_timeout_protect_thread():
"""Start the background thread used to protect dokan from timeouts.
This function starts the background thread that monitors calls into the
dokan API and resets their timeouts. It's safe to call this more than
once, only a single thread will be started.
"""
global _TIMEOUT_PROTECT_THREAD
with _TIMEOUT_PROTECT_LOCK:
if _TIMEOUT_PROTECT_THREAD is None:
target = _run_timeout_protect_thread
_TIMEOUT_PROTECT_THREAD = threading.Thread(target=target)
_TIMEOUT_PROTECT_THREAD.daemon = True
_TIMEOUT_PROTECT_THREAD.start()
def _run_timeout_protect_thread():
while True:
with _TIMEOUT_PROTECT_COND:
try:
(when,info,finished) = _TIMEOUT_PROTECT_QUEUE.popleft()
except IndexError:
_TIMEOUT_PROTECT_COND.wait()
continue
if finished:
continue
now = time.time()
wait_time = max(0,_TIMEOUT_PROTECT_WAIT_TIME - now + when)
time.sleep(wait_time)
with _TIMEOUT_PROTECT_LOCK:
if finished:
continue
libdokan.DokanResetTimeout(_TIMEOUT_PROTECT_RESET_TIME,info)
_TIMEOUT_PROTECT_QUEUE.append((now+wait_time,info,finished))
def timeout_protect(func):
"""Method decorator to enable timeout protection during call.
This decorator adds an entry to the timeout protect queue before executing
the function, and marks it as finished when the function exits.
"""
@wraps(func)
def wrapper(self,*args):
if _TIMEOUT_PROTECT_THREAD is None:
_start_timeout_protect_thread()
info = args[-1]
finished = []
try:
with _TIMEOUT_PROTECT_COND:
_TIMEOUT_PROTECT_QUEUE.append((time.time(),info,finished))
_TIMEOUT_PROTECT_COND.notify()
return func(self,*args)
finally:
with _TIMEOUT_PROTECT_LOCK:
finished.append(True)
return wrapper
MIN_FH = 100
class FSOperations(object):
"""Object delegating all DOKAN_OPERATIONS pointers to an FS object."""
def __init__(self, fs, fsname="Dokan FS", volname="Dokan Volume"):
if libdokan is None:
msg = "dokan library (http://dokan-dev.net/en/) is not available"
raise OSError(msg)
self.fs = |
chrissmall22/odl-client | odlclient/datatypes.py | Python | apache-2.0 | 43,110 | 0.00007 | #!/usr/bin/env python
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Python Data Types used for the REST objects """
import json
ETHERNET = ['ipv4', 'arp', 'rarp', 'snmp', 'ipv6',
'mpls_u', 'mpls_m', 'lldp', 'pbb', 'bddp']
VERSION = ['1.0.0', '1.1.0', '1.2.0', '1.3.0)']
ACTIONS = ['output',
'set_vlan_vid',
'set_vlan_pcp',
'strip_vlan',
'set_dl_src',
'set_dl_dst',
'set_nw_src',
'set_nw_dst',
'set_nw_tos',
'set_tp_src',
'set_tp_dst',
'enqueue']
CAPABILITIES = ['flow_stats',
'table_stats',
'port_stats',
'stp',
'group_stats',
'reserved',
'ip_reasm',
'queue_stats',
'arp_match_ip',
'port_blocked'
]
PORT_CONFIG = ["port_down",
"no_stp",
"no_recv",
"ro_recv_stp",
"no_flood",
"no_fwd",
"no_packet_in"
]
PORT_STATE = ["link_down",
"blocked",
"live",
"stp_listen",
"stp_learn",
"stp_forward",
"stp_block"
]
PORT_FEATURES = ["rate_10mb_hd",
"rate_10mb_fd",
"rate_100mb_hd",
"rate_100mb_fd",
"rate_1gb_hd",
"rate_1gb_fd",
"rate_10gb_fd",
"rate_40gb_fd",
"rate_100gb_fd",
"rate_1tb_fd",
"rate_other",
"copper",
"fiber",
"autoneg",
"pause",
"pause_asym"
]
FLOW_MOD_CMD = ["add",
"modify",
"modify_strict",
"delete",
"delete_strict"
]
FLOW_MOD_FLAGS = ["send_flow_rem",
"check_overlap",
"emerg",
"reset_counts",
"no_packet_counts",
"no_byte_counts"]
IP_PROTOCOL = ["tcp",
"udp",
"sctp",
"icmp",
"ipv6-icmp"
]
ICMP_V6_TYPE = ["nbr_sol", "nbr_adv"]
MATCH_MODE = ["none", "present", "exact"]
IPV6_EXTHDR = ["no_next",
"esp",
"auth",
"dest",
"frag",
"router",
"hop",
"un_rep",
"un_seq"]
METER_FLAGS = ["kbps",
"pktps",
"burst",
"stats"]
METER_TYPE = ["drop", "dscp_remark", "experimenter"]
GROUP_TYPE = ["all", "select", "indirect", "ff"]
COMMANDS = ["add", "modify", "delete"]
LINK_STATE = ["link_down",
"blocked",
"live",
"stp_listen",
"stp_learn",
"stp_forward",
"stp_block"
]
OPERATION = ["ADD", "CHANGE", "DELETE", "MOVE"]
ENUMS = [ETHERNET,
VERSION,
ACTIONS,
CAPABILITIES,
PORT_CONFIG,
PORT_STATE,
PORT_FEATURES,
FLOW_MOD_CMD,
FLOW_MOD_FLAGS,
IP_PROTOCOL,
ICMP_V6_TYPE,
MATCH_MODE,
ICMP_V6_TYPE,
MATCH_MODE,
IPV6_EXTHDR,
METER_FLAGS,
METER_TYPE,
GROUP_TYPE,
COMMANDS,
LINK_STATE,
OPERATION
]
METHODS = ["factory", "to_json_string", "to_dict"]
KEYWORDS = ["self"]
JSON_MAP = {'datapath': 'Datapath',
'meter_features': 'MeterFeatures',
'group_features': 'GroupFeatures',
'port': 'Port',
'meter': 'Meter',
'flow': 'Flow',
'group': 'Group',
'cluster': 'Cluster',
'packet': 'Packet',
'path': 'Path',
'app': 'App',
'license': 'License',
'support_report': None,
'observation': 'Observation',
'nexthop': 'NextHop'
}
PLURALS = {'datapaths': JSON_MAP['datapath'],
'controller_stats': 'ControllerStats',
'stats': 'Stats',
'ports': JSON_MAP['port'],
'meters': JSON_MAP['meter'],
'flows': JSON_MAP['flow'],
'groups': JSON_MAP['group'],
'clusters': JSON_MAP['cluster'],
'links': 'Link',
'nodes': 'Node',
'arps': 'Arp',
'lldp_suppressed': 'LldpProperties',
'observations': JSON_MAP['observation'],
'packets': JSON_M | AP['packet'],
'apps': JSON_MAP['app'],
'licenses': JSON_MAP['license'],
'paths': JSON_MAP['path'],
'nexthops': JSON_MAP['nexthop']
}
CLASS_MAP = {'ControllerStats': {'lost': 'Counter',
'packet_in': 'Counter',
'packet_out': 'Counter'},
'Team': {'systems': 'TeamSystem'},
'Flow': {'match': 'Match',
'actions': 'Action', |
'instructions': 'Instruction'},
'Stats': {'port_stats': 'PortStats',
'group_stats': 'GroupStats',
'meter_stats': 'MeterStats'},
'Packet': {'eth': 'Ethernet',
'ip': 'Ip',
'ipv6': 'Ipv6',
'udp': 'Udp',
'tcp': 'Tcp',
'dhcp': 'Dhcp',
'icmp': 'Icmp',
'icmpv6': 'Icmpv6'}
}
class JsonObjectFactory(object):
factories = {}
@staticmethod
def add_factory(id, factory):
JsonObjectFactory.factories[id] = factory
@staticmethod
def create(id, data):
for key in data:
if key in KEYWORDS:
new_key = key + "_"
data[new_key] = data.pop(key)
if id not in JsonObjectFactory.factories:
JsonObjectFactory.add_factory(id, eval(id))
return JsonObjectFactory.factories[id].factory(data)
class JsonObject(object):
""" This is the base class for all HP SDN Client data types."""
def __str__(self):
return self.to_json_string()
def to_json_string(self):
tmp = self.to_dict()
return json.dumps(tmp, sort_keys=True,
indent=4, separators=(',', ': '))
def to_dict(self):
data = {}
attributes = [attr for attr in dir(self)
if not callable(getattr(self, attr))
and not attr.startswith("__")]
for attr in attributes:
if getattr(self, attr) is not None:
value = getattr(self, attr)
if isinstance(value, list):
tmp = []
for list_item in value:
if isinstance(list_item, JsonObject):
tmp.append(list_item.to_dict())
else:
tmp.append(list_item)
data[attr.__str__()] = tmp
elif isinstance(value, JsonObject):
data[attr.__str__()] = value.to_dict()
elif type(value):
data[attr.__str__()] = value
return data
@classmethod
def factory(cls, data):
try:
cm = CLASS_MAP[cls.__name__]
for |
RationalAsh/configs | i3-exit.py | Python | mit | 4,584 | 0.007199 | #!/usr/bin/env python
# based on cb-exit used in CrunchBang Linux <http://crunchbanglinux.org/>
import pygtk
pygtk.require('2.0')
import gtk
import os
import getpass
import time
class i3_exit:
def disable_buttons(self):
self.cancel.set_sensitive(False)
self.logout.set_sensitive(False)
self.suspend.set_sensitive(False)
self.reboot.set_sensitive(False)
self.shutdown.set_sensitive(False)
def cancel_action(self,btn):
self.disable_buttons()
gtk.main_quit()
def logout_action(self,btn):
self.disable_buttons()
self.status.set_label("Exiting i3, please standby...")
os.system("i3-msg exit")
def suspend_action(self,btn):
self.disable_buttons()
self.status.set_label("Suspending, please standby...")
os.system("scrot \"/home/rationalash/Pictures/lockscreen.png\"")
time.sleep(1)
os.system("i3lock -i /home/rationalash/Pictures/lockscreen.png")
time.sleep(1)
os.system("dbus-send --system --print-reply \
--dest=\"org.freedesktop.UPower\" \
/org/freedesktop/UPower \
org.freedesktop.UPower.Suspend")
gtk.main_quit()
def reboot_action(self,btn):
self.disable_buttons()
self.status.set_label("Rebooting, please standby...")
os.system("dbus-send --system --print-reply \
--dest=\"org.freedesktop.ConsoleKit\" \
/org/freedesktop/ConsoleKit/Manager \
org.freedesktop.ConsoleKit.Manager.Restart")
def shutdown_action(self,btn):
self.disable_buttons()
self.status.set_label("Shutting down, please standby...")
os.system("dbus-send --system --print-reply \
--dest=\"org.freedesktop.ConsoleKit\" \
/org/freedesktop/ConsoleKit/Manager \
org.freedesktop.ConsoleKit.Manager.Stop")
def create_window(self):
self.window = gtk.Window()
title = "Log out " + getpass.getuser() + "? Choose an option:"
self.window.set_title(title)
self.window.set_border_width(5)
self.window.set_size_request(500, 80)
self.window.set_resizable(False)
self.window.set_keep_above(True)
self.window.stick
self.window.set_position(1)
self.window.connect("delete_event", gtk.main_quit)
windowicon = self.window.render_icon(gtk.STOCK_QUIT, gtk.ICON_SIZE_MENU)
self.window.set_icon(windowicon)
#Create HBox for buttons
self.button_box = gtk.HBox()
self.button_box.show()
#Cancel button
self.cancel = gtk.Button(stock = gtk.STOCK_CANCEL)
self.cancel.set_border_width(4)
self.cancel.connect("clicked", self.cancel_action)
self.button_box.pack_start(self.cancel)
self.cancel.show()
#Logout button
self.logout = gtk.Button("_Log out")
self.logout.set_border_width(4)
self.logout.connect("clicked", self.logout_action)
self.button_box.pack_start(self.logout)
self.logout.show()
#Suspend button
self.suspend = gtk.Button("_Suspend")
self.suspend.set_border_width(4)
self.suspend.connect("clicked", self.suspend_action)
self.button_box.pack_start(self.suspend)
self.suspend.show()
#Reboot button
self.reboot = gtk.Button("_Reboot")
self.reboot.s | et_border_widt | h(4)
self.reboot.connect("clicked", self.reboot_action)
self.button_box.pack_start(self.reboot)
self.reboot.show()
#Shutdown button
self.shutdown = gtk.Button("_Power off")
self.shutdown.set_border_width(4)
self.shutdown.connect("clicked", self.shutdown_action)
self.button_box.pack_start(self.shutdown)
self.shutdown.show()
#Create HBox for status label
self.label_box = gtk.HBox()
self.label_box.show()
self.status = gtk.Label()
self.status.show()
self.label_box.pack_start(self.status)
#Create VBox and pack the above HBox's
self.vbox = gtk.VBox()
self.vbox.pack_start(self.button_box)
self.vbox.pack_start(self.label_box)
self.vbox.show()
self.window.add(self.vbox)
self.window.show()
def __init__(self):
self.create_window()
def main():
gtk.main()
if __name__ == "__main__":
go = i3_exit()
main()
|
lpancescu/atlas-lint | setup.py | Python | mit | 3,767 | 0.000266 | # vim: set fileencoding=utf-8 :
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='endymion',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.4.1',
description='A small tool to check the link validity of external Vagrant boxes on Atlas',
long_description=long_description,
# The project's main homepage.
url='https://github.com/lpancescu/endymion',
# Author details
author='Laurențiu Păncescu',
author_email='laurentiu@laurentiupancescu.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Utilities',
# Pick your license as y | ou wish (should match "license" above) |
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='vagrant atlas',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=[]),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'endymion=endymion:main',
],
},
)
|
City-of-Helsinki/kerrokantasi | democracy/models/initial_data.py | Python | mit | 860 | 0.001168 | from democracy.enums import InitialSectionType
INITIAL_SECTION_TYPE_DATA = [
{
'identifier': InitialSectionType.MA | IN,
'name_singular': 'pääosio',
'name_plural': 'pääosiot', |
},
{
'identifier': InitialSectionType.CLOSURE_INFO,
'name_singular': 'sulkeutumistiedote',
'name_plural': 'sulkeutumistiedotteet',
},
{
'identifier': InitialSectionType.SCENARIO,
'name_singular': 'vaihtoehto',
'name_plural': 'vaihtoehdot',
},
{
'identifier': InitialSectionType.PART,
'name_singular': 'osa-alue',
'name_plural': 'osa-alueet',
},
]
def create_initial_section_types(section_type_model):
for section in INITIAL_SECTION_TYPE_DATA:
section_type_model.objects.update_or_create(identifier=section['identifier'], defaults=section)
|
lcpt/xc | verif/tests/materials/ec2/test_EC2Concrete.py | Python | gpl-3.0 | 5,205 | 0.074352 | # -*- coding: utf-8 -*-
from __future__ import division
from materials.ec2 import EC2_materials
import os
from miscUtils import LogMessages as lmsg
__author__= "Ana Ortega (AO_O) "
__copyright__= "Copyright 2015, AO_O"
__license__= "GPL"
__version__= "3.0"
__email__= "ana.ortega@ciccp.es "
fckDat=[12,16,20,25,30,35,40,45,50,55,60,70,80,90]
fcmCalc=[]
fctmCalc=[]
fctk005Calc=[]
fctk095Calc=[]
EcmCalc=[]
Epsc1Calc=[]
Epscu1Calc=[]
Epsc2Calc=[]
Epscu2Calc=[]
ExpNCalc=[]
Epsc3Calc=[]
Epscu3Calc=[]
for i in range(len(fckDat)):
name='C'+str(fckDat[i])
fck=-1*fckDat[i]*1e6 #[Pa][-]
concr= EC2_materials.EC2Concrete(name,fck,1.5)
fcm=concr.getFcm()/(-1e6)
fcmCalc.append(fcm)
fctm=round(concr.getFctm()/1e6,1)
fctmCalc.append(fctm)
fctk005=round(concr.getFctk005()/1e6,1)
fctk005Calc.append(fctk005)
fctk095=round(concr.getFctk095()/1e6,1)
fctk095Calc.append(fctk095)
concr.typeAggregate='Q'
Ecm=round(concr.getEcm()/1e9,0)
EcmCalc.append(Ecm)
Epsc1=round(concr.getEpsc1()*(-1e3),1)
Epsc1Calc.append(Epsc1)
Epscu1=round(concr.getEpscu1()*(-1e3),1)
Epscu1Calc.append(Epscu1)
Epsc2=round(concr.getEpsc2()*(-1e3),1)
Epsc2Calc.append(Epsc2)
Epscu2=round(concr.getEpscu2()*(-1e3),1)
Epscu2Calc.append(Epscu2)
ExpN=round(concr.getExpN(),1)
ExpNCalc.append(ExpN)
if concr.fckMPa()<=50:
Epsc3=round(concr.getEpsc3()*(-1e3),2)
else:
Epsc3=round(concr.getEpsc3()*(-1e3),1)
Epsc3Calc.append(Epsc3)
Epscu3=round(concr.getEpscu3()*(-1e3),1)
Epscu3Calc.append(Epscu3)
#Test Fcm
fcmDat=[20,24,28,33,38,43,48,53,58,63,68,78,88,98] #[MPa]
sqrErr= 0.0
for i in range(0,len(fcmDat)):
sqrErr+= (fcmDat[i]-fcmCalc[i])**2
fname= os.path.basename(__file__)+'-fcm'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Fctm
fctmDat=[1.6,1.9,2.2,2.6,2.9,3.2,3.5,3.8,4.1,4.2,4.4,4.6,4.8,5.0] #[MPa]
sqrErr= 0.0
for i in range(0,len(fctmDat)):
sqrErr+= (fctmDat[i]-fctmCalc[i])**2
fname= os.path.basename(__file__)+'-fctm'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Fctk005
fctk005Dat=[1.1,1.3,1.5,1.8,2.0,2.2,2.5,2.7,2.9,3.0,3.0,3.2,3.4,3.5] #[MPa]
sqrErr= 0.0
for i in range(0,len(fctk005Dat)):
sqrErr+= (fctk005Dat[i]-fctk005Calc[i])**2
fname= os.path.basename(__file__)+'-fctk005'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Fctk095
fctk095Dat=[2.0,2.5,2.9,3.3,3.8,4.2,4.6,4.9,5.3,5.5,5.7,6.0,6.3,6.6] #[MPa]
sqrErr= 0.0
for | i in range(0,len(fctk095Dat)):
sqrErr+= (fctk095Dat[i]-fctk095Calc[i])**2
fname= os.path.basename(__file__)+'-fctk095'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Ecm
EcmDat=[27,29,30,31,33,34,35,36,37,38,39,41,42,44] #[GPa]
sqrErr= 0.0
for i in range(0,len(EcmDat)):
sqrErr+= (EcmDat[i]-EcmCalc[i])**2
fname= os.path.basename(__file__)+'-Ecm'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR. | ')
#Test Epsc1
Epsc1Dat=[1.8,1.9,2.0,2.1,2.2,2.2,2.3,2.4,2.5,2.5,2.6,2.7,2.8,2.8] #[per thousand]
sqrErr= 0.0
for i in range(0,len(Epsc1Dat)):
sqrErr+= (Epsc1Dat[i]-Epsc1Calc[i])**2
fname= os.path.basename(__file__)+'-Epsc1'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Epscu1
Epscu1Dat=[3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.2,3.0,2.8,2.8,2.8] #[per thousand]
sqrErr= 0.0
for i in range(0,len(Epscu1Dat)):
sqrErr+= (Epscu1Dat[i]-Epscu1Calc[i])**2
fname= os.path.basename(__file__)+'-Epscu1'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Epsc2
Epsc2Dat=[2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.2,2.3,2.4,2.5,2.6] #[per thousand]
sqrErr= 0.0
for i in range(0,len(Epsc2Dat)):
sqrErr+= (Epsc2Dat[i]-Epsc2Calc[i])**2
fname= os.path.basename(__file__)+'-Epsc2'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Epscu2
Epscu2Dat=[3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.1,2.9,2.7,2.6,2.6] #[per thousand]
sqrErr= 0.0
for i in range(0,len(Epscu2Dat)):
sqrErr+= (Epscu2Dat[i]-Epscu2Calc[i])**2
fname= os.path.basename(__file__)+'-Epscu2'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test ExpN
ExpNDat=[2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,1.8,1.6,1.4,1.4,1.4]
sqrErr= 0.0
for i in range(0,len(ExpNDat)):
sqrErr+= (ExpNDat[i]-ExpNCalc[i])**2
fname= os.path.basename(__file__)+'-ExpN'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Epsc3
Epsc3Dat=[1.75,1.75,1.75,1.75,1.75,1.75,1.75,1.75,1.75,1.8,1.9,2.0,2.2,2.3] #[per thousand]
sqrErr= 0.0
for i in range(0,len(Epsc3Dat)):
sqrErr+= (Epsc3Dat[i]-Epsc3Calc[i])**2
fname= os.path.basename(__file__)+'-Epsc3'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
#Test Epscu3
Epscu3Dat=[3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.5,3.1,2.9,2.7,2.6,2.6] #[per thousand]
sqrErr= 0.0
for i in range(0,len(Epscu3Dat)):
sqrErr+= (Epscu3Dat[i]-Epscu3Calc[i])**2
fname= os.path.basename(__file__)+'-Epscu3'
if sqrErr<1e-8:
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
|
fbradyirl/home-assistant | homeassistant/components/media_player/const.py | Python | apache-2.0 | 1,935 | 0 | """Proides the constants needed for component."""
ATTR_APP_ID = "app_id"
ATTR_APP_NAME = "app_name"
ATTR_INPUT_SOURCE = "source"
ATTR_INPUT_SOURCE_LIST = "source_list"
ATTR_MEDIA_ALBUM_ARTIST = "media_album_artist"
ATTR_MEDIA_ALBUM_NAME = "media_album_name"
ATTR_MEDIA_ARTIST = "media_artist"
ATTR_MEDIA_CHANNEL = "media_channel"
ATTR_MEDIA_CONTENT_ID = "media_content_id"
ATTR_MEDIA_CONTENT_TYPE = "media_content_type"
ATTR_MEDIA_DURATION = "media_duration"
ATTR_MEDIA_ENQUEUE = "enqueue"
ATTR_MEDIA_EPISODE = "media_episode"
ATTR_MEDIA_PLAYLIST = "media_playlist"
ATTR_MEDIA_POSITION = "media_position"
ATTR_MEDIA_POSITION_UPDATED_AT = "media_position_updated_at"
ATTR_MEDIA_SEASON = "media_season"
ATTR_MEDIA_SEEK_POSITION = "seek_positio | n"
ATTR_MEDIA_SERIES_TITLE = "media_series_title"
ATTR_MEDIA_SHUFFLE = "shuffle"
ATTR_MEDIA_TITLE = "media_title"
ATTR_MEDIA_TRACK = "media_track"
ATTR_MEDIA_VOLUME_LEVEL = "volume_level"
ATTR_MEDIA_VOLUME_MUTED = "is_volume_muted"
ATTR_SOUND_MODE = "sound_mode"
ATTR_SOUND_MODE_LIST = "sound_mode_list"
DOMAIN = "media_player"
MEDIA_TYPE_MUSIC = "music"
MEDIA_TYPE_TVSHOW = "tvshow"
MEDIA_TYPE_MOVIE = "movie"
MEDIA_TYPE_VIDEO = "video"
MEDIA_TYPE_EPISODE = "episode"
MEDIA | _TYPE_CHANNEL = "channel"
MEDIA_TYPE_PLAYLIST = "playlist"
MEDIA_TYPE_IMAGE = "image"
MEDIA_TYPE_URL = "url"
MEDIA_TYPE_GAME = "game"
MEDIA_TYPE_APP = "app"
SERVICE_CLEAR_PLAYLIST = "clear_playlist"
SERVICE_PLAY_MEDIA = "play_media"
SERVICE_SELECT_SOUND_MODE = "select_sound_mode"
SERVICE_SELECT_SOURCE = "select_source"
SUPPORT_PAUSE = 1
SUPPORT_SEEK = 2
SUPPORT_VOLUME_SET = 4
SUPPORT_VOLUME_MUTE = 8
SUPPORT_PREVIOUS_TRACK = 16
SUPPORT_NEXT_TRACK = 32
SUPPORT_TURN_ON = 128
SUPPORT_TURN_OFF = 256
SUPPORT_PLAY_MEDIA = 512
SUPPORT_VOLUME_STEP = 1024
SUPPORT_SELECT_SOURCE = 2048
SUPPORT_STOP = 4096
SUPPORT_CLEAR_PLAYLIST = 8192
SUPPORT_PLAY = 16384
SUPPORT_SHUFFLE_SET = 32768
SUPPORT_SELECT_SOUND_MODE = 65536
|
i1ikey0u/watchwxfiles | watchdog_of_wxgzh.py | Python | gpl-3.0 | 6,141 | 0.02833 | #!/usr/bin/env python
# coding:utf-8
# environ: python3.6 32bit
# code by shuichon
'''
V3.2,优化了执行流程和操作的简易性,修补了几个bug
'''
from urllib import request, parse
from bs4 import BeautifulSoup
import re, json, sys
def get_warn(soup):
'''
频繁访问时,搜狗会需要二次验证,判断是否需要二次验证
'''
warn = soup.find_all('p', class_='ip-time-p')
return len(warn)
def get_gzh_lists(keys, gzh_num=1):
"""
根据关键字keys,搜索公众号,未登录搜狗情况下,最多搜索10页,共计100个公众号。
gzh_num为从搜索结果中提取多少个公众号,数组gzh_list,内容为公众号列表
"""
gzh_lists = []
m = 1
while gzh_num > 0:
pages_url = "http://weixin.sogou.com/weixin?query=" + parse.quote(keys) + "&type=1&page=%s" % m
print("当前搜索URL为:", pages_url)
cots = request.urlopen(pages_url).read().decode('utf-8')
gzhlist_soup = BeautifulSoup(cots, "html.parser")
if get_warn(gzhlist_soup) > 0:
print('发送请求过于频繁,存在二次验证,请手工打开搜狗微信搜索通过验证!')
break
else:
gzhbox2 = gzhlist_soup.find_all('div', class_="gzh-box2")
if len(gzhbox2) < gzh_num:
num = len(gzhbox2)
elif len(gzhbox2) > gzh_num:
num = gzh_num
print("从当前页获取 %i 个公众号。" % num)
for n in range(0, num):
uni_gzh = gzhbox2[n].find('a').attrs['href']
gzh_lists.append(uni_gzh)
gzh_num = gzh_num - num
m += 1
return gzh_lists
def get_gzh_content_top10(gzh_url):
"""
传入公众号地址gzh_url,获取某个公众号最近的10篇链接
以列表方式,返回该公众号最近10篇的文章访问地址
该函数不再用于文章内容的过滤,转由grep_gzh()落实
"""
gzh_wz_l = []
infos = request.urlopen(gzh_url).read().decode('utf-8')
infos_soup = BeautifulSoup(infos, "html.parser")
if get_warn(infos_soup) > 0:
print('发送请求过于频繁,存在二次验证,请手工打开搜狗微信搜索通过验证!')
else:
json2 = infos_soup.find('script', type="text/javascript", text=re.compile("var msgList*"))
splitjson = json2.text.split('\r\n')
jsoninfo = splitjson[8].replace(" var msgList = ", '')
jsonData = json.loads(jsoninfo[0:-1])
for l in range(len(jsonData['list'])):
inf_u = jsonData['list'][l]['app_msg_ext_info']['content_url']
inf_u = inf_u.replace("&", "&")
inf_u = "http://mp.weixin.qq.com" + inf_u
gzh_wz_l.append(inf_u)
return gzh_wz_l
def grep_gzh(url, key):
'''
对给定文章中的关键字进行过滤,输出含有指定关键字的文章链接
'''
contents = request.urlopen(url).read().decode('utf-8')
pat = re.compile(key)
res = len(pat.findall(contents))
if res > 0:
print("发现 %i 处匹配的关键字" % res + '\n')
print("公众号文章临时访问地址为:" + url + '\n')
else:
print("未发现存在匹配的关键字")
if __name__ == "__main__":
print("f*ck wechat !")
version = "version v3.2 by shuichon @ 2017年8月28日"
if len(sys.argv) > 1:
if sys.argv[1].startswith('-'):
option = sys.argv[1][1:]
if option == 'v':
print(version)
elif option == 'h':
print('''
=================================================================
| 使用说明: |
| 参数说明: |
| -v : 当前版本 |
| -h : 显示本帮助说明 |
| -gzh : 查找指定关键字的公众号,默认返回第一个 |
| -gnum : 可选参数,从结果中取gnum个公众号,默认1 |
| -key : 可选参数,搜索1个公众号,返回近10篇文章中,包含key的文章URL |
| -kws : 内容识别关键字,查找包含kws的公众号文章, |
| -gnum : 可选参数,在结果中取gnum个公众号,默认1 |
=================================================================
''')
elif option == 'v':
print(version)
elif option == 'gzh':
print('当前参数数量为:', len(sys.argv[:]), sys.argv[:])
print('搜索和"%s"相关 | 的公众号' % sys.argv[2])
if (len(sys.argv[:]) > 3) and (sys.argv[3][1:] == 'gnum'):
option2 = sys.argv[3][1:]
print('获取%i个微信公众号进行内容识别' % int(sys.argv[4]))
gzh_url_l = get_gzh_lists(keys=sys.argv[2], gzh_num=int(sys.argv[4]))
for l in gzh_url_l:
print("当前公众号访问URL:")
print(l)
print(" | 当前公众号最近10篇公众号文章URL:")
for wz in get_gzh_content_top10(l):
print(wz)
elif (len(sys.argv[:]) > 3) and (sys.argv[3][1:] == 'key'):
option2 = sys.argv[3][1:]
print('使用 "%s" 关键字搜索微信公众号' % sys.argv[2])
gzh_url_l = get_gzh_lists(keys=sys.argv[2])
for l in gzh_url_l:
print(l)
for wz in get_gzh_content_top10(l):
grep_gzh(wz, key=sys.argv[4])
else:
print(get_gzh_lists(keys=sys.argv[2]))
elif option == 'kws':
option2 = sys.argv[2]
print('公众号内容识别关键字为:' + option2)
if len(sys.argv[:]) == 3:
print('默认获取1个微信公众号')
gzh_url_l = get_gzh_lists(keys=sys.argv[2])
print(gzh_url_l)
elif (len(sys.argv[:]) > 3) and (sys.argv[3][1:] == 'gnum') :
option3 = sys.argv[3]
print('获取%i个微信公众号' % int(sys.argv[4]))
gzh_url_l = get_gzh_lists(keys=sys.argv[2], gzh_num=int(sys.argv[4]))
print(gzh_url_l)
for l in gzh_url_l:
print("当前公众号最近10篇公众号文章URL:")
for wz in get_gzh_content_top10(l):
print(wz)
else:
print('''Unknown option !
please input '-h' behind the script.''')
else:
print('''Unknown option !
please input '-h' behind the script.''')
else:
print('Nothing done!')
|
badracket/django-brdesigner | brdesigner/admin.py | Python | gpl-2.0 | 2,044 | 0.029843 | from django import forms
from django.contrib import admin
from myproject.brdesigner.model | s import *
from django.utils.safestring import mark_safe
class BadRacketAdminBase(admin.ModelAdmin):
class Media:
js = (
'brdesigner/js/admin_list_reorder.js',
' | brdesigner/js/tinymce/tinymce.min.js',
'brdesigner/js/textareas.js',
)
class SortablePageAdmin(BadRacketAdminBase):
list_display = ["id","name","title","url_pattern","page_type","is_active","display_order"]
list_display_links = ["id"]
list_editable = ["name","title","url_pattern","page_type","is_active","display_order"]
class SortableMenuItemAdmin(BadRacketAdminBase):
list_display = ["id","display_name","target_page","external_link","is_active","rel","target","display_order"]
list_display_links = ["id"]
list_editable = ["display_name","target_page","external_link","is_active","rel","target","display_order"]
class SortablePageAdmin(BadRacketAdminBase):
list_display = ["id","name","title","url_pattern","page_type","is_active","display_order"]
list_display_links = ["id"]
list_editable = ["name","title","url_pattern","page_type","is_active","display_order"]
class SortableJsFileLoadAdmin(BadRacketAdminBase):
list_display = ["id","path","is_local","is_active","display_order"]
list_display_links = ["id"]
list_editable = ["path","is_local","is_active","display_order"]
class SortableCssFileLoadAdmin(BadRacketAdminBase):
list_display = ["id","path","is_local","is_active","display_order"]
list_display_links = ["id"]
list_editable = ["path","is_local","is_active","display_order"]
admin.site.register(PageType)
admin.site.register(Page, SortablePageAdmin)
admin.site.register(MenuItem, SortableMenuItemAdmin)
admin.site.register(BrandImages)
admin.site.register(JsFileLoad, SortableJsFileLoadAdmin)
admin.site.register(CssFileLoad, SortableCssFileLoadAdmin)
admin.site.register(CssSelector)
admin.site.register(CssSetting)
admin.site.register(GoogleAnalytics)
|
jorik041/Network | dht/node.py | Python | mit | 3,684 | 0.000814 | """
Copyright (c) 2014 Brian Muller
Copyright (c) 2015 OpenBazaar
"""
import heapq
from operator import itemgetter
from protos import objects
class Node:
def __init__(self, id, ip=None, port=None, signed_pubkey=None,
vendor=False):
self.id = id
self.ip = ip
| self.port = port
self.signed_pubkey = signed_pubkey
self.vendor = vendor
self.long_id = long(id.encode('hex'), 16)
def getProto(self):
n = objects.Node()
n.guid = self.id
n.signedPublicKey = se | lf.signed_pubkey
n.vendor = self.vendor
if self.ip is not None: n.ip = self.ip
if self.port is not None: n.port = self.port
return n
def sameHomeAs(self, node):
return self.ip == node.ip and self.port == node.port
def distanceTo(self, node):
"""
Get the distance between this node and another.
"""
return self.long_id ^ node.long_id
def __iter__(self):
"""
Enables use of Node as a tuple - i.e., tuple(node) works.
"""
return iter([self.id, self.ip, self.port])
def __repr__(self):
return repr([self.long_id, self.ip, self.port])
def __str__(self):
return "%s:%s" % (self.ip, str(self.port))
class NodeHeap(object):
"""
A heap of nodes ordered by distance to a given node.
"""
def __init__(self, node, maxsize):
"""
Constructor.
@param node: The node to measure all distnaces from.
@param maxsize: The maximum size that this heap can grow to.
"""
self.node = node
self.heap = []
self.contacted = set()
self.maxsize = maxsize
def remove(self, peerIDs):
"""
Remove a list of peer ids from this heap. Note that while this
heap retains a constant visible size (based on the iterator), it's
actual size may be quite a bit larger than what's exposed. Therefore,
removal of nodes may not change the visible size as previously added
nodes suddenly become visible.
"""
peerIDs = set(peerIDs)
if len(peerIDs) == 0:
return
nheap = []
for distance, node in self.heap:
if node.id not in peerIDs:
heapq.heappush(nheap, (distance, node))
self.heap = nheap
def getNodeById(self, id):
for _, node in self.heap:
if node.id == id:
return node
return None
def allBeenContacted(self):
return len(self.getUncontacted()) == 0
def getIDs(self):
return [n.id for n in self]
def markContacted(self, node):
self.contacted.add(node.id)
def popleft(self):
if len(self) > 0:
return heapq.heappop(self.heap)[1]
return None
def push(self, nodes):
"""
Push nodes onto heap.
@param nodes: This can be a single item or a C{list}.
"""
if not isinstance(nodes, list):
nodes = [nodes]
for node in nodes:
if node not in self:
distance = self.node.distanceTo(node)
heapq.heappush(self.heap, (distance, node))
def __len__(self):
return min(len(self.heap), self.maxsize)
def __iter__(self):
nodes = heapq.nsmallest(self.maxsize, self.heap)
return iter(map(itemgetter(1), nodes))
def __contains__(self, node):
for distance, n in self.heap:
if node.id == n.id:
return True
return False
def getUncontacted(self):
return [n for n in self if n.id not in self.contacted]
|
dabillox/kcl-globalgasflaring | src/tests/test_ggf.py | Python | mit | 7,690 | 0.00078 | import unittest
import pandas as pd
import numpy as np
import glob
import epr
import src.utils as utils
from src.ggf.detectors import SLSDetector, ATXDetector
class MyTestCase(unittest.TestCase):
# -----------------
# unit tests
# -----------------
def test_szn_interpolation(self):
path_to_data = glob.glob("../../data/test_data/S3A*.zip")[0]
path_to_target = "../../data/test_data/sls_szn.npy"
path_to_temp = "../../data/temp/"
target = np.load(path_to_target)
product = utils.extract_zip(path_to_data, path_to_temp)
HotspotDetector = SLSDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.sza).all())
def test_night_mask_sls(self):
path_to_data = glob.glob("../../data/test_data/S3A*.zip")[0]
path_to_target = "../../data/test_data/sls_nightmask.npy"
path_to_temp = "../../data/temp/"
target = np.load(path_to_target)
product = utils.extract_zip(path_to_data, path_to_temp)
HotspotDetector = SLSDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.night_mask).all())
def test_night_mask_atx(self):
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
path_to_target = "../../data/test_data/atx_nightmask.npy"
target = np.load(path_to_target)
target_mean = np.mean(target)
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
HotspotDetector.run_detector()
self.assertAlmostEqual(target_mean, np.mean(HotspotDetector.night_mask))
def test_vza_interpolation(self):
path_to_data = glob.glob("../../data/test_data/S3A*.zip")[0]
path_to_target = "../../data/test_data/sls_vza.npy"
path_to_temp = "../../data/temp/"
target = np.load(path_to_target)
product = utils.extract_zip(path_to_data, path_to_temp)
HotspotDetector = SLSDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.vza).all())
def test_vza_mask(self):
path_to_data = glob.glob("../../data/test_data/S3A*.zip")[0]
path_to_target = "../../data/test_data/sls_vza_mask.npy"
path_to_temp = "../../data/temp/"
target = np.load(path_to_target)
product = utils.extract_zip(path_to_data, path_to_temp)
HotspotDetector = SLSDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.vza_mask).all())
def test_detect_hotspots_sls(self):
path_to_data = glob. | glob("../../data/test_data/S3A*.zip")[0]
path_to_target = "../../data/test_data/sls_de | tect_hotspots.npy"
path_to_temp = "../../data/temp/"
target = np.load(path_to_target)
product = utils.extract_zip(path_to_data, path_to_temp)
HotspotDetector = SLSDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.hotspots).all())
def test_detect_hotspots_atx(self):
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
path_to_target = "../../data/test_data/atx_detect_hotspots.npy"
target = np.load(path_to_target)
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.hotspots).all())
def test_cloud_free_atx(self):
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
path_to_target = "../../data/test_data/atx_cloud_mask.npy"
target = np.load(path_to_target)
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
HotspotDetector.run_detector()
self.assertEqual(True, (target == HotspotDetector.cloud_free).all())
def test_get_arcmin_int(self):
coords = np.array([-150.53434, -100.13425, -50.20493, 0.34982, 50.43562, 100.12343, 150.56443])
target = np.array([-15032, -10008, -5012, 21, 5026, 10007, 15034])
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
result = HotspotDetector._find_arcmin_gridcell(coords)
self.assertEqual(True, (target == result).all())
def test_radiance_from_reflectance(self):
path_to_target = "../../data/test_data/atx_radiance_from_reflectance.npy"
target = np.load(path_to_target)
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
reflectance = product.get_band('reflec_nadir_1600').read_as_array()
result = HotspotDetector._rad_from_ref(reflectance)
self.assertEqual(True, (target == result).all())
def test_radiance_from_BT(self):
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
brightness_temp = 1500
wavelength = 1.6
result = HotspotDetector._rad_from_BT(wavelength, brightness_temp)
target = 28200.577465487077
self.assertAlmostEqual(target, result)
def test_sun_earth_distance(self):
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
target = 0.9877038273760421
result = HotspotDetector._compute_sun_earth_distance()
self.assertAlmostEqual(target, result)
def test_compute_frp(self):
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
HotspotDetector.run_detector(flares_or_sampling=True)
path_to_target = "../../data/test_data/atx_frp.npy"
target = np.load(path_to_target)
result = HotspotDetector.frp
self.assertEqual(True, (target == result).all())
# -----------------
# functional tests
# -----------------
def test_run_atx(self):
target = pd.read_csv(glob.glob("../../data/test_data/ATS*.csv")[0])
path_to_data = glob.glob("../../data/test_data/*.N1")[0]
product = epr.Product(path_to_data)
HotspotDetector = ATXDetector(product)
HotspotDetector.run_detector()
result = HotspotDetector.to_dataframe(keys=['latitude', 'longitude'])
# TODO determine why floating point errors are causing issues in testing here
target = target.astype(int)
result = result.astype(int)
are_equal = target.equals(result)
self.assertEqual(True, are_equal)
def test_run_sls(self):
# setup
target = pd.read_csv(glob.glob("../../data/test_data/S3A*.csv")[0])
path_to_data = glob.glob("../../data/test_data/S3A*.zip")[0]
path_to_temp = "../../data/temp/"
product = utils.extract_zip(path_to_data, path_to_temp)
HotspotDetector = SLSDetector(product)
HotspotDetector.run_detector()
result = HotspotDetector.to_dataframe(keys=['latitude', 'longitude', 'sza', 'vza', 'swir_16', 'swir_22'])
# TODO determine why floating point errors are causing issues in testing here
target = target.astype(int)
result = result.astype(int)
# compare
are_equal = target.equals(result)
self.assertEqual(True, are_equal)
if __name__ == '__main__':
unittest.main()
|
NarlikarLab/DIVERSITY | weblogoMod/corebio/db/astral.py | Python | gpl-3.0 | 12,519 | 0.011662 |
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# Modifications Copyright 2004/2005 James Casbon.
# Copyright 2005 by Regents of the University of California. All rights reserved
# (Major rewrite for conformance to corebio. Gavin Crooks)
#
# This code is derived from the Biopython distribution and is governed by it's
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""ASTRAL: Compendium for Sequence and Structure Analysis.
The ASTRAL compendium provides databases and tools useful for analyzing protein structures and their sequences. It is partially derived from, and augments the SCOP: Structural Classification of Proteins database. Most of the resources depend upon the coordinate files maintained and distributed by the Protein Data Bank.
Ref:
http://astral.berkeley.edu/
* Classes :
- Raf -- A file of ASTRAL RAF (Rapid Access Format) Sequence Maps.
- RafSeqMap -- A sequence map, a RAF record.
- Res -- A single residue mapping from a RAF record.
* Functions :
- parse_domain -- Convert an ASTRAL fasta header string into a Scop domain.
- normalize_letters -- Normalize RAF amino acid codes.
"""
import re
from copy import copy
from corebio.db.scop import Domain, Residues
from corebio.data import extended_three_to_one as to_one_letter_code
from corebio.utils import FileIndex
__all__ = ('astral_evalues', 'astral_percent_identities',
'astral_evalues_filenames', 'normalize_letters', 'parse_domain',
'Raf', 'RafSeqMap', 'Res')
# Percentage identity filtered ASTRAL SCOP genetic domain sequence subset
astral_percent_identities = [10,20,25,30,35,40,50,70,90,95,100]
# E-value filtered ASTRAL SCOP genetic domain sequence subsets, based on PDB SEQRES records.
astral_evalues = [10, 5, 1, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 1e-4, 1e-5, 1e-10, 1e-15,1e-20, 1e-25, 1e-50]
# A map between evalues and astral filename suffixes.
astral_evalues_filenames = {
10: 'e+1', 5: 'e+0,7', 1: 'e+0', 0.5: 'e-0,3', 0.1: 'e-1',
0.05: 'e-1,3', 0.01: 'e-2', 0.005: 'e-2,3', 0.001: 'e-3',
1e-4: 'e-4', 1e-5: 'e-5', 1e-10: 'e-10', 1e-15: 'e-15',
1e-20: 'e-20', 1e-25: 'e-25', 1e-50: 'e-50' }
def normalize_letters(one_letter_code) :
"""Convert RAF one-letter amino acid codes into IUPAC standard codes.
Letters are uppercased, and "." ("Unknown") is converted to "X".
"""
if one_letter_code == '.' :
return 'X'
else :
return one_letter_code.upper() |
_domain_re = re.compile(r">?([\w_\.]*)\s+([\w\.]*)\s+\(([^)]*)\) (.*)")
def parse_domain(str) :
"""Convert an ASTRAL fasta header string into a SCOP domain.
An ASTRAL (http://astral.stanford.edu/) header contains a concise
description of a SCOP domain. A very similar format is used when a
Domain ob | ject is converted into a string. The Domain returned by this
method contains most of the SCOP information, but it will not be located
within the SCOP hierarchy (i.e. the parent node will be None). The
description is composed of the SCOP protein and species descriptions.
A typical ASTRAL header looks like --
>d1tpt_1 a.46.2.1 (1-70) Thymidine phosphorylase {Escherichia coli}
"""
m = _domain_re.match(str)
if (not m) : raise ValueError("Domain: "+ str)
dom = Domain()
dom.sid = m.group(1)
dom.sccs = m.group(2)
dom.residues = Residues(m.group(3))
if not dom.residues.pdbid :
dom.residues.pdbid= dom.sid[1:5]
dom.description = m.group(4).strip()
return dom
class Raf(FileIndex) :
"""ASTRAL RAF (Rapid Access Format) Sequence Maps.
The ASTRAL RAF Sequence Maps record the relationship between the PDB SEQRES
records (representing the sequence of the molecule used in an experiment)
and the ATOM records (representing the atoms experimentally observed).
This data is derived from the Protein Data Bank CIF files. Known errors in
the CIF files are corrected manually, with the original PDB file serving as
the final arbiter in case of discrepancies.
Residues are referenced by residue ID. This consists of the PDB residue
sequence number (up to 4 digits) and an optional PDB insertion code (an
ascii alphabetic character, a-z, A-Z). e.g. "1", "10A", "1010b", "-1"
See "ASTRAL RAF Sequence Maps":http://astral.stanford.edu/raf.html
The RAF file itself is about 50 MB. Each line consists of a sequence map of
a different protein chain. This index provides rapid, random
access of RAF records without having to load the entire file into memory.
This class does not load the entire RAF file into memory. Instead, it
reads the file once, noting the location and content of each RafSeqMap.
The index key is a concatenation of the PDB ID and chain ID. e.g
"2drcA", "155c_". RAF uses an underscore to indicate blank
chain IDs. Custom maps of subsequences or spanning multiple chains can
be constructed with the get_seqmap method.
"""
def __init__(self, raf_file) :
def linekey(line) :
if not line or len(line)<5 or line.isspace() or line[0]=='#':
return None
return line[0:5]
def parser( f) : return RafSeqMap(f.readline())
FileIndex.__init__(self, raf_file, linekey, parser)
def get_seqmap(self, residues) :
"""Get the sequence map for a collection of residues.
residues -- A SCOP style description of a collection of residues from a
PDB strucure, (e.g. '(1bba A:10-20,B:)'), as a string or a
scop.Residues instance.
"""
if type(residues)== str :
residues = Residues(residues)
pdbid = residues.pdbid
frags = residues.fragments
if not frags: frags =(('_','',''),) # All residues of unnamed chain
seqMap = None
for frag in frags :
chainid = frag[0]
if chainid=='' or chainid=='-' or chainid==' ' or chainid=='_':
chainid = '_'
sid = pdbid + chainid
sm = self[sid]
# Cut out fragment of interest
start = 0
end = len(sm.res)
if frag[1] : start = int(sm.index(frag[1], chainid))
if frag[2] : end = int(sm.index(frag[2], chainid)+1)
sm = sm[start:end]
if seqMap is None :
seqMap = sm
else :
seqMap += sm
return seqMap
# End Raf
class RafSeqMap(object) :
"""ASTRAL RAF (Rapid Access Format) Sequence Maps.
RafSeqMap is a list like object; you can find the location of particular
residues with index(), slice this RafSeqMap into fragments, and glue
fragments back together with extend().
- pdbid -- The PDB 4 character ID
- pdb_datestamp -- From the PDB file
- version -- The RAF format version. e.g. 0.01
- flags -- RAF flags. (See release notes for more information.)
- res -- A list of Res objects, one for each residue in this sequence map
"""
def __init__(self, raf_record=None) :
"""Parses a RAF record into a RafSeqMap object."""
self.pdbid = ''
self.pdb_datestamp = ''
self.version = ''
self.flags = ''
self.res = []
if not raf_record : return
header_len = 38
line = raf_record.rstrip() # no trailing whitespace
if len(line)<header_len:
raise ValueError("Incomplete header: "+line)
self.pdbid = line[0:4]
chainid = line[4:5]
self.version = line[6:10]
# Raf format versions 0.01 and 0.02 are identical for practical purposes
if(self.version != "0.01" and self.version !="0.02") :
raise ValueError("Incompatible RAF version: "+self.version)
self.pdb_datestamp = line[14:20]
self.flags = line[21:27]
for i in range(header_len, len(line), 7) |
nwjs/chromium.src | tools/flags/list_flags.py | Python | bsd-3-clause | 4,638 | 0.009702 | #!/usr/bin/env python3
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Emits a formatted, optionally filtered view of the list of flags.
"""
from __future__ import print_function
import argparse
import os
import re
import sys
ROOT_PATH = os.path.join(os.path.dirname(__file__), '..', '..')
PYJSON5_PATH = os.path.join(ROOT_PATH, 'third_party', 'pyjson5', 'src')
DEPOT_TOOLS_PATH = os.path.join(ROOT_PATH, 'third_party', 'depot_tools')
sys.path.append(PYJSON5_PATH)
sys.path.append(DEPOT_TOOLS_PATH)
import json5
import owners
def load_metadata():
flags_path = os.path.join(ROOT_PATH, 'chrome', 'browser',
'flag-metadata.json')
return json5.load(open(flags_path))
def keep_expired_by(flags, mstone):
"""Filter flags to contain only flags that expire by mstone.
Only flags that either never expire or have an expiration milestone <= mstone
are in the returned list.
>>> keep_expired_by([{'expiry_milestone': 3}], 2)
[]
>>> keep_expired_by([{'expiry_milestone': 3}], 3)
| [{'expiry_milestone': 3}]
>>> keep_expired_by([{'expiry_milestone': -1}], 3)
[]
"""
return [f for f in flags if -1 != f['expiry_milestone'] <= mstone]
def keep_never_expires(flags):
"""Filter flags to contain only flags that never expire.
>>> keep_never_expires([{'expiry_milestone': -1}, {'expiry_milestone': 2}])
[{'expiry_milestone': -1}]
"""
return [f for f in flags if f['expiry_milestone'] == -1]
def resolve_owners(flags):
"""Resolves sets | of owners for every flag in the provided list.
Given a list of flags, for each flag, resolves owners for that flag. Resolving
owners means, for each entry in a flag's owners list:
* Turning owners files references into the transitive set of owners listed in
those files
* Turning bare usernames into @chromium.org email addresses
* Passing any other type of entry through unmodified
"""
owners_db = owners.Database(ROOT_PATH, open, os.path)
new_flags = []
for f in flags:
new_flag = f.copy()
new_owners = []
for o in f['owners']:
if o.startswith('//') or '/' in o:
new_owners += owners_db.owners_rooted_at_file(re.sub('//', '', o))
elif '@' not in o:
new_owners.append(o + '@chromium.org')
else:
new_owners.append(o)
new_flag['resolved_owners'] = new_owners
new_flags.append(new_flag)
return new_flags
def find_unused(flags):
FLAG_FILES = [
'chrome/browser/about_flags.cc',
'ios/chrome/browser/flags/about_flags.mm',
]
flag_files_data = [open(f, 'r', encoding='utf-8').read() for f in FLAG_FILES]
unused_flags = []
for flag in flags:
# Search for the name in quotes.
needle = '"%s"' % flag['name']
if not any([needle in data for data in flag_files_data]):
unused_flags.append(flag)
return unused_flags
def print_flags(flags, verbose):
"""Prints the supplied list of flags.
In verbose mode, prints name, expiry, and owner list; in non-verbose mode,
prints just the name.
>>> f1 = {'name': 'foo', 'expiry_milestone': 73, 'owners': ['bar', 'baz']}
>>> f1['resolved_owners'] = ['bar@c.org', 'baz@c.org']
>>> f2 = {'name': 'bar', 'expiry_milestone': 74, 'owners': ['//quxx/OWNERS']}
>>> f2['resolved_owners'] = ['quxx@c.org']
>>> print_flags([f1], False)
foo
>>> print_flags([f1], True)
foo,73,bar baz,bar@c.org baz@c.org
>>> print_flags([f2], False)
bar
>>> print_flags([f2], True)
bar,74,//quxx/OWNERS,quxx@c.org
"""
for f in flags:
if verbose:
print('%s,%d,%s,%s' % (f['name'], f['expiry_milestone'], ' '.join(
f['owners']), ' '.join(f['resolved_owners'])))
else:
print(f['name'])
def main():
import doctest
doctest.testmod()
parser = argparse.ArgumentParser(description=__doc__)
group = parser.add_mutually_exclusive_group()
group.add_argument('-n', '--never-expires', action='store_true')
group.add_argument('-e', '--expired-by', type=int)
group.add_argument('-u', '--find-unused', action='store_true')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--testonly', action='store_true')
args = parser.parse_args()
if args.testonly:
return
flags = load_metadata()
if args.expired_by:
flags = keep_expired_by(flags, args.expired_by)
if args.never_expires:
flags = keep_never_expires(flags)
if args.find_unused:
flags = find_unused(flags)
flags = resolve_owners(flags)
print_flags(flags, args.verbose)
if __name__ == '__main__':
main()
|
DistributedML/TorML | eurosys-eval/results_tor_no_tor/makeplot.py | Python | mit | 1,404 | 0.002849 | import matplotlib.pyplot as plt
import numpy as np
import pdb
if __name__ == "__main__":
fig, ax = plt.subplots(figsize=(10,5))
for clients in (10, 50, 100, 200):
median_data = np.zeros(5)
for k in (1, 2, 3, 4, 5):
data = np.loadtxt("loss_" + str(clients) + "_" + str(k) + ".csv", delimiter=',')
median_data[k-1] = data.shape[0]
print str(clients) + " median is " | + str(np.median(median_data))
print str(clients) + " stddev is " + str(np.std(median_data | ))
data1 = np.loadtxt("loss_10_2.csv", delimiter=',')
data2 = np.loadtxt("loss_50_2.csv", delimiter=',')
data3 = np.loadtxt("loss_100_2.csv", delimiter=',')
data4 = np.loadtxt("loss_200_2.csv", delimiter=',')
plt.plot(data1, color="black", label="10 clients", lw=5)
plt.plot(data2, color="red", label="50 clients", lw=5)
plt.plot(data3, color="orange", label="100 clients", lw=5)
plt.plot(data4, color="green", label="200 clients", lw=5)
plt.legend(loc='best', ncol=1, fontsize=18)
plt.xlabel("Time (s)", fontsize=22)
plt.ylabel("Training Error", fontsize=22)
axes = plt.gca()
axes.set_ylim([0, 0.5])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.setp(ax.get_xticklabels(), fontsize=18)
plt.setp(ax.get_yticklabels(), fontsize=18)
plt.tight_layout()
plt.show() |
gtaylor/evarify | setup.py | Python | mit | 1,220 | 0 | import re
from setuptools import setup, find_packages
with open('evarify/__init__.py', 'r') as fd:
version = re.search(r'^__version | __\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
setup(
name="evarify",
version=version,
author="Greg Taylor",
author_email="gtaylor@gc-taylor.com",
description="Environment variable validation and coercion.",
long_description=open('README.rst').read(),
license="MIT License",
| keywords="environment variable",
url='https://github.com/gtaylor/evarify',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
packages=find_packages(exclude=['tests']),
package_data={'': ['LICENSE', '*.txt', '*.rst']},
tests_require=['nose'],
test_suite='nose.collector',
)
|
nicolashainaux/mathmaker | tests/integration/mental_calculation/04_yellow1/test_04_yellow1_W02b.py | Python | gpl-3.0 | 1,241 | 0 | # -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2018 Nicolas Hainaux <nh.techn@gmail.com>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along | with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from mathmaker.lib import shared
from mathmaker.lib.document.frames import Sheet
def test_W02b():
"""Check this sheet is | generated without any error."""
shared.machine.write_out(str(Sheet('mental_calculation',
'04_yellow1',
'W02b')),
pdf_output=True)
|
beni55/flocker | flocker/common/__init__.py | Python | apache-2.0 | 193 | 0 | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Shared flocker components.
"""
__al | l__ = ['INode', 'FakeNode', 'ProcessNode']
from ._ipc import INode, FakeNode, ProcessNod | e
|
pdehaye/theming-edx-platform | common/lib/xmodule/xmodule/capa_module.py | Python | agpl-3.0 | 44,840 | 0.001784 | import cgi
import datetime
import hashlib
import json
import logging
import os
import traceback
import struct
import sys
from pkg_resources import resource_string
from capa.capa_problem import LoncapaProblem
from capa.responsetypes import StudentInputError, \
ResponseError, LoncapaProblemError
from capa.util import convert_files_to_filenames
from .progress import Progress
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xmodule.exceptions import NotFoundError, ProcessingError
from xblock.core import Scope, String, Boolean, Dict, Integer, Float
from .fields import Timedelta, Date
from django.utils.timezone import UTC
log = logging.getLogger("mitx.courseware")
# Generate this many different variants of problems with rerandomize=per_student
NUM_RANDOMIZATION_BINS = 20
# Never produce more than this many different seeds, no matter what.
MAX_RANDOMIZATION_BINS = 1000
def randomization_bin(seed, problem_id):
"""
Pick a randomization bin for the problem given the user's seed and a problem id.
We do this because we only want e.g. 20 randomizations of a problem to make analytics
interesting. To avoid having sets of students that always get the same problems,
we'll combine the system's per-student seed with the problem id in picking the bin.
"""
h = hashlib.sha1()
h.update(str(seed))
h.update(str(problem_id))
# get the first few digits of the hash, convert to an int, then mod.
return int(h.hexdigest()[:7], 16) % NUM_RANDOMIZATION_BINS
class Randomization(String):
"""
Define a field to store how to randomize a problem.
"""
def from_json(self, value):
if value in ("", "true"):
return "always"
elif value == "false":
return "per_student"
return value
to_json = from_json
class ComplexEncoder(json.JSONEncoder):
"""
Extend the JSON encoder to correctly handle complex numbers
"""
def default(self, obj):
"""
Print a nicely formatted complex number, or default to the JSON encoder
"""
if isinstance(obj, complex):
return u"{real:.7g}{imag:+.7g}*j".format(real=obj.real, imag=obj.imag)
return json.JSONEncoder.default(self, obj)
class CapaFields(object):
"""
Define the possible fields for a Capa problem
"""
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
scope=Scope.settings,
# it'd be nice to have a useful default but it screws up other things; so,
# use display_name_with_default for those
default="Blank Advanced Problem"
)
attempts = Integer(help="Number of attempts taken by the student on this problem",
default=0, scope=Scope.user_state)
max_attempts = Integer(
display_name="Maximum Attempts",
help=("Defines the number of times a student can try to answer this problem. "
"If the value is not set, infinite attempts are allowed."),
values={"min": 0}, scope=Scope.settings
)
due = Date(help="Date that this problem is due by", scope=Scope.setti | ngs)
graceperiod = Timedelta(
help="Amount of time after the due date that submissions will be accepted",
scope=Scope.settings
)
showanswer = S | tring(
display_name="Show Answer",
help=("Defines when to show the answer to the problem. "
"A default value can be set in Advanced Settings."),
scope=Scope.settings,
default="finished",
values=[
{"display_name": "Always", "value": "always"},
{"display_name": "Answered", "value": "answered"},
{"display_name": "Attempted", "value": "attempted"},
{"display_name": "Closed", "value": "closed"},
{"display_name": "Finished", "value": "finished"},
{"display_name": "Past Due", "value": "past_due"},
{"display_name": "Never", "value": "never"}]
)
force_save_button = Boolean(
help="Whether to force the save button to appear on the page",
scope=Scope.settings,
default=False
)
rerandomize = Randomization(
display_name="Randomization",
help="Defines how often inputs are randomized when a student loads the problem. "
"This setting only applies to problems that can have randomly generated numeric values. "
"A default value can be set in Advanced Settings.",
default="never",
scope=Scope.settings,
values=[
{"display_name": "Always", "value": "always"},
{"display_name": "On Reset", "value": "onreset"},
{"display_name": "Never", "value": "never"},
{"display_name": "Per Student", "value": "per_student"}
]
)
data = String(help="XML data for the problem", scope=Scope.content, default="<problem></problem>")
correct_map = Dict(help="Dictionary with the correctness of current student answers",
scope=Scope.user_state, default={})
input_state = Dict(help="Dictionary for maintaining the state of inputtypes", scope=Scope.user_state)
student_answers = Dict(help="Dictionary with the current student responses", scope=Scope.user_state)
done = Boolean(help="Whether the student has answered the problem", scope=Scope.user_state)
seed = Integer(help="Random seed for this student", scope=Scope.user_state)
weight = Float(
display_name="Problem Weight",
help=("Defines the number of points each problem is worth. "
"If the value is not set, each response field in the problem is worth one point."),
values={"min": 0, "step": .1},
scope=Scope.settings
)
markdown = String(help="Markdown source of this module", default=None, scope=Scope.settings)
source_code = String(
help="Source code for LaTeX and Word problems. This feature is not well-supported.",
scope=Scope.settings
)
class CapaModule(CapaFields, XModule):
"""
An XModule implementing LonCapa format problems, implemented by way of
capa.capa_problem.LoncapaProblem
CapaModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__
"""
icon_class = 'problem'
js = {'coffee': [resource_string(__name__, 'js/src/capa/display.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
],
'js': [resource_string(__name__, 'js/src/capa/imageinput.js'),
resource_string(__name__, 'js/src/capa/schematic.js')
]}
js_module_name = "Problem"
css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]}
def __init__(self, *args, **kwargs):
"""
Accepts the same arguments as xmodule.x_module:XModule.__init__
"""
XModule.__init__(self, *args, **kwargs)
due_date = self.due
if self.graceperiod is not None and due_date:
self.close_date = due_date + self.graceperiod
else:
self.close_date = due_date
if self.seed is None:
self.choose_new_seed()
# Need the problem location in openendedresponse to send out. Adding
# it to the system here seems like the least clunky way to get it
# there.
self.system.set('location', self.location.url())
try:
# TODO (vshnayder): move as much as possible of this work and error
# checking to descriptor load time
self.lcp = self.new_lcp(self.get_state_for_lcp())
# At this point, we need to persist the randomization seed
# so that when the problem is re-loaded (to check/view/save)
# it stays the same.
# However, we do not want to write to the database
# every time the module is loaded.
# So we set the seed ONLY when there is not one set already
if self.seed |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/object_detection/trainer_test.py | Python | bsd-2-clause | 6,635 | 0.002411 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or | agreed to in writing, software
# distributed under the License is distributed on an "AS IS | " BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.trainer."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection import trainer
from object_detection.core import losses
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.protos import train_pb2
NUMBER_OF_CLASSES = 2
def get_input_function():
"""A function to get test inputs. Returns an image with one box."""
image = tf.random_uniform([32, 32, 3], dtype=tf.float32)
class_label = tf.random_uniform(
[1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32)
box_label = tf.random_uniform(
[1, 4], minval=0.4, maxval=0.6, dtype=tf.float32)
return {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_classes: class_label,
fields.InputDataFields.groundtruth_boxes: box_label
}
class FakeDetectionModel(model.DetectionModel):
"""A simple (and poor) DetectionModel for use in test."""
def __init__(self):
super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES)
self._classification_loss = losses.WeightedSigmoidClassificationLoss(
anchorwise_output=True)
self._localization_loss = losses.WeightedSmoothL1LocalizationLoss(
anchorwise_output=True)
def preprocess(self, inputs):
"""Input preprocessing, resizes images to 28x28.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
"""
return tf.image.resize_images(inputs, [28, 28])
def predict(self, preprocessed_inputs):
"""Prediction tensors from inputs tensor.
Args:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs)
class_prediction = tf.contrib.layers.fully_connected(
flattened_inputs, self._num_classes)
box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4)
return {
'class_predictions_with_background': tf.reshape(
class_prediction, [-1, 1, self._num_classes]),
'box_encodings': tf.reshape(box_prediction, [-1, 1, 4])
}
def postprocess(self, prediction_dict, **params):
"""Convert predicted output tensors to final detections. Unused.
Args:
prediction_dict: a dictionary holding prediction tensors.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary with empty fields.
"""
return {
'detection_boxes': None,
'detection_scores': None,
'detection_classes': None,
'num_detections': None
}
def loss(self, prediction_dict):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
Returns:
a dictionary mapping strings (loss names) to scalar tensors representing
loss values.
"""
batch_reg_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.boxes))
batch_cls_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.classes))
weights = tf.constant(
1.0, dtype=tf.float32,
shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1])
location_losses = self._localization_loss(
prediction_dict['box_encodings'], batch_reg_targets,
weights=weights)
cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'], batch_cls_targets,
weights=weights)
loss_dict = {
'localization_loss': tf.reduce_sum(location_losses),
'classification_loss': tf.reduce_sum(cls_losses),
}
return loss_dict
def restore_fn(self, checkpoint_path, from_detection_checkpoint=True):
"""Return callable for loading a checkpoint into the tensorflow graph.
Args:
checkpoint_path: path to checkpoint to restore.
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
a callable which takes a tf.Session and does nothing.
"""
def restore(unused_sess):
return
return restore
class TrainerTest(tf.test.TestCase):
def test_configure_trainer_and_train_two_steps(self):
train_config_text_proto = """
optimizer {
adam_optimizer {
learning_rate {
constant_learning_rate {
learning_rate: 0.01
}
}
}
}
data_augmentation_options {
random_adjust_brightness {
max_delta: 0.2
}
}
data_augmentation_options {
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
}
num_steps: 2
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(create_tensor_dict_fn=get_input_function,
create_model_fn=FakeDetectionModel,
train_config=train_config,
master='',
task=0,
num_clones=1,
worker_replicas=1,
clone_on_cpu=True,
ps_tasks=0,
worker_job_name='worker',
is_chief=True,
train_dir=train_dir)
if __name__ == '__main__':
tf.test.main()
|
chapman-phys227-2016s/cw-5-classwork-team | calculus.py | Python | mit | 2,271 | 0.005724 | #!/usr/bin/python
"""
Cw5 the pinacle of grupwork and efficienceyc and comroodery
Calculus module implementing
discrete function function
differentiation function
and trapezoidal integration function
"""
import numpy as np
import math
def diff(f, a, b, n):
"""An iterative version of a forward differentiation method."""
x = np.linspace(a, b, n+1)
y = np.zeros(len(x))
z = np.zeros(len(x))
h = (b-a)/float(n)
for i in xrange(len(x)):
y[i] = f(x[i])
for i in xrange(len(x)-1):
z[i] = (y[i+1] - y[i])/h
z[n] = (y[n] - y[n-1])/h
return y, z
def discrete_func(f, a, b, n):
"""Generates a vectorized linspace and applies a function to a similar data structure."""
x = np.linspace(a, b, n+1)
g = np.vectorize(f)
y = g(x)
return x, y
def diff2(f, a, b, n):
"""A matrix version of a forward differentiation method."""
x, y = discrete_func(f, a, b, n - 1)
matrix = np.zeros((n,n))
h = ( b - a ) / float(n)
count = -1
for i in range(n):
if(count >= 0 and count < n-2):
matrix[i][count] = 1 / (2 * h)
matrix[i][count + 2] = -1 / (2 * h)
count += 1
matrix[0][0] = -1 / h
matrix[0][1] = 1 / h
matrix[-1][-1] = 1 / h
| matrix[-1][-2] = -1 / h
return np.dot(matrix, y)
def test_diff():
apt = math.fabs(diff(math.sin, 0, 1, 100000)[1][-1] - math.cos(1)) < 1e-3
msg = 'That aint how the sine function do.'
assert apt, msg
def test_diff2():
apt = math.fabs(diff(math.sin, 0, 1, 100000)[1][-1] - math.cos(1)) < 1e-3
msg = 'That aint how the sine function do.'
assert apt, msg
def trapez | oidal_matrix(f, a, b, n):
"""Trapezoidal integration via matrix multiplication."""
h = (b-a)/float(n)
indexer = np.linspace(a, b, n)
values = f(indexer)
matrixer = np.zeros(n)
matrixer.fill(h)
matrixer[0] = h/2.0
matrixer[n - 1] = h/2.0
I = np.dot(values, matrixer)
return I
def test_trap_matrix():
"""Trapezoidal integration via matrix multiplication verified by integrating
the sine function on the integral 0 to pi over 2."""
apt = np.abs(trapezoidal_matrix(np.sin, 0, np.pi/2.0, 10000) - 1) < 1e-3
msg = 'That aint how the sine do.'
assert apt, msg
|
vrde/pandora | pandora/views.py | Python | mit | 458 | 0.002183 | from django.shortcuts import render
f | rom django.middleware.csrf import get_token
from ajaxuploader.views import AjaxFileUploader
from pandora.backends import SignalBasedLocalUploadBackend
from pandora.models import Item
def home(request):
return render(request, 'pandora/home.html', {
'items': Item.objects.all(),
'csrf_token': get_token(request)
})
import_uploader = Aja | xFileUploader(SignalBasedLocalUploadBackend)
|
saxix/django-uuid-pk | django_uuid_pk/tests/settings.py | Python | bsd-3-clause | 1,626 | 0.00246 | import os
SITE_ID = 1
STATIC_URL = '/static/'
SECRET_KEY =';pkj;lkj;lkjh;lkj;oi'
db = os.environ.get('DBENGINE', None)
if db == 'pg':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'django_uuid_pk',
'HOST': '127.0.0.1',
'PORT': '',
'USER': 'postgres',
'PASSWORD': '',
'OPTIONS': {
'autocommit': True, # same value for all versions of django (is the default in 1.6)
}}}
elif db == 'mysql':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django_uuid_pk',
'HOST': '127.0.0.1',
'PORT': '',
'USER': 'root',
'PASSWORD': ''}}
else:
| DATABASES = {
' | default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'django_uuid_pk.sqlite',
'HOST': '',
'PORT': ''}}
INSTALLED_APPS = ('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django_uuid_pk.tests')
ALLOWED_HOSTS = ('127.0.0.1',)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)-8s: %(asctime)s %(name)10s: %(funcName)40s %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
}
|
modulexcite/catapult | dashboard/dashboard/group_report_test.py | Python | bsd-3-clause | 6,327 | 0.003161 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import webapp2
import webtest
from google.appengine.ext import ndb
from dashboard import group_report
from dashboard import test_owner
from dashboard import testing_common
from dashboard import utils
from dashboard.models import anomaly
from dashboard.models import bug_data
from dashboard.models import sheriff
from dashboard.models import stoppage_alert
class GroupReportTest(testing_common.TestCase):
def setUp(self) | :
super(GroupReportTest, self).setUp()
app = webapp2.WSGIApplication(
[('/group_report', group_report.GroupReportHandler)])
self.testapp = webtest.TestApp(app)
def _AddAnomalyEntities(
self, revision_ranges, test_key, sheriff_key, bug_id=None):
"""Adds a group of Anomaly entities to the datastore."""
urlsafe_keys = []
for start_rev, end_rev in revision_ranges:
anomaly_key = anomaly.Anomaly(
start_revision=start_rev, en | d_revision=end_rev,
test=test_key, bug_id=bug_id, sheriff=sheriff_key,
median_before_anomaly=100, median_after_anomaly=200).put()
urlsafe_keys.append(anomaly_key.urlsafe())
return urlsafe_keys
def _AddTests(self):
"""Adds sample Test entities and returns their keys."""
testing_common.AddTests(['ChromiumGPU'], ['linux-release'], {
'scrolling-benchmark': {
'first_paint': {},
'mean_frame_time': {},
}
})
keys = [
utils.TestKey(
'ChromiumGPU/linux-release/scrolling-benchmark/first_paint'),
utils.TestKey(
'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time'),
]
# By default, all Test entities have an improvement_direction of UNKNOWN,
# meaning that neither direction is considered an improvement.
# Here we set the improvement direction so that some anomalies are
# considered improvements.
for test_key in keys:
test = test_key.get()
test.improvement_direction = anomaly.DOWN
test.put()
return keys
def _AddSheriff(self):
"""Adds a Sheriff entity and returns the key."""
return sheriff.Sheriff(
id='Chromium Perf Sheriff', email='sullivan@google.com').put()
def testGet_WithAnomalyKeys_ShowsSelectedAndOverlapping(self):
sheriff_key = self._AddSheriff()
test_keys = self._AddTests()
selected_ranges = [(400, 900), (200, 700)]
overlapping_ranges = [(300, 500), (500, 600), (600, 800)]
non_overlapping_ranges = [(100, 200)]
selected_keys = self._AddAnomalyEntities(
selected_ranges, test_keys[0], sheriff_key)
self._AddAnomalyEntities(
overlapping_ranges, test_keys[0], sheriff_key)
self._AddAnomalyEntities(
non_overlapping_ranges, test_keys[0], sheriff_key)
response = self.testapp.get(
'/group_report?keys=%s' % ','.join(selected_keys))
alert_list = self.GetEmbeddedVariable(response, 'ALERT_LIST')
# Expect selected alerts + overlapping alerts,
# but not the non-overlapping alert.
self.assertEqual(5, len(alert_list))
def testGet_WithKeyOfNonExistentAlert_ShowsError(self):
key = ndb.Key('Anomaly', 123)
response = self.testapp.get('/group_report?keys=%s' % key.urlsafe())
self.assertIn('error', response.body)
self.assertIn('No Anomaly found for key', response.body)
def testGet_WithInvalidKeyParameter_ShowsError(self):
response = self.testapp.get('/group_report?keys=foobar')
self.assertIn('error', response.body)
self.assertIn('Invalid Anomaly key', response.body)
def testGet_WithRevParameter(self):
# If the rev parameter is given, then all alerts whose revision range
# includes the given revision should be included.
sheriff_key = self._AddSheriff()
test_keys = self._AddTests()
self._AddAnomalyEntities(
[(190, 210), (200, 300), (100, 200), (400, 500)],
test_keys[0], sheriff_key)
response = self.testapp.get('/group_report?rev=200')
alert_list = self.GetEmbeddedVariable(response, 'ALERT_LIST')
self.assertEqual(3, len(alert_list))
def testGet_WithInvalidRevParameter_ShowsError(self):
response = self.testapp.get('/group_report?rev=foo')
self.assertIn('error', response.body)
self.assertIn('Invalid rev', response.body)
def testGet_WithBugIdParameter(self):
sheriff_key = self._AddSheriff()
test_keys = self._AddTests()
bug_data.Bug(id=123).put()
self._AddAnomalyEntities(
[(200, 300), (100, 200), (400, 500)],
test_keys[0], sheriff_key, bug_id=123)
self._AddAnomalyEntities(
[(150, 250)], test_keys[0], sheriff_key)
response = self.testapp.get('/group_report?bug_id=123')
alert_list = self.GetEmbeddedVariable(response, 'ALERT_LIST')
self.assertEqual(3, len(alert_list))
def testGet_WithBugIdParameter_ListsStoppageAlerts(self):
test_keys = self._AddTests()
bug_data.Bug(id=123).put()
row = testing_common.AddRows(utils.TestPath(test_keys[0]), {100})[0]
alert = stoppage_alert.CreateStoppageAlert(test_keys[0].get(), row)
alert.bug_id = 123
alert.put()
response = self.testapp.get('/group_report?bug_id=123')
alert_list = self.GetEmbeddedVariable(response, 'ALERT_LIST')
self.assertEqual(1, len(alert_list))
def testGet_WithBugIdForBugThatHasOwner_ShowsOwnerInfo(self):
sheriff_key = self._AddSheriff()
test_keys = self._AddTests()
bug_data.Bug(id=123).put()
test_key = test_keys[0]
test_path_parts = utils.TestPath(test_key).split('/')
test_suite_path = '%s/%s' % (test_path_parts[0], test_path_parts[2])
test_owner.AddOwnerFromDict({test_suite_path: ['foo@bar.com']})
self._AddAnomalyEntities([(150, 250)], test_key, sheriff_key, bug_id=123)
response = self.testapp.get('/group_report?bug_id=123')
owner_info = self.GetEmbeddedVariable(response, 'OWNER_INFO')
self.assertEqual('foo@bar.com', owner_info[0]['email'])
def testGet_WithInvalidBugIdParameter_ShowsError(self):
response = self.testapp.get('/group_report?bug_id=foo')
self.assertNotIn('ALERT_LIST', response.body)
self.assertIn('Invalid bug ID', response.body)
if __name__ == '__main__':
unittest.main()
|
rendermotion/RMPY | AutoRig/RMGenericRigStructure.py | Python | lgpl-3.0 | 2,106 | 0.004748 | import maya.cmds as cmds
try:
from MetacubeScripts import MetacubeFileNameConvention
except:
pass
class genericRigStructure(object):
def __init__(self):
self.groups = {
"mesh": {"group": "mesh_grp",
"subGroup": ["body_grp", "cloth_grp", "accesories_grp", "hair_grp", "trackers_grp",
"collision_grp", "pxycloth_grp", "pxyhair_grp", "dynspline_grp"]},
"rig": {"group": "rig_grp", "subGroup": None},
"controls": {"group": "control_grp", "subGroup": None}
}
try:
self.FileNameConv = MetacubeFileNameConvention.MetacubeFileNameConvention()
except:
self.FileNameConv = None
self.CreateStructure()
def CreateStructure(self):
MainGroupName = None
if self.FileNameConv != None:
if self.FileNameConv.nameInFormat:
MainGroupName = self.FileNameConv.AssetType + "_" + self.FileNameConv.AssetName + "_rig"
else:
MainGroupName = "MainCharacter"
else:
MainGroupName = "MainCharacter"
if cmds.objExists(MainGroupName):
MainGroup = MainGroupName
else:
MainGroup = cmds.group(empty=True, name=MainGroupName)
for genericGroup in self.groups:
if cmds.objExists(self.groups[genericGroup]["group"]):
if not cmds.listRelatives(self.groups[genericGroup]["group"], parent=True)[0] == MainGroupName:
cmds.parent(self.groups[genericGroup]["group"], MainGroupName)
else:
cmds.group(empty=True, name=self.groups[genericGroup]["group"])
cmds.parent(self.groups[genericGroup]["group"], MainGroupName)
if self.groups[genericGroup]["subGroup | "]:
for eachgroup in self.groups[genericGroup]["subGroup"]:
if not cmds.ob | jExists(eachgroup):
cmds.group(empty=True, name=eachgroup)
cmds.parent(eachgroup, self.groups[genericGroup]["group"])
|
albertaparicio/tfg-voice-conversion | attention_graphs.py | Python | gpl-3.0 | 905 | 0.022099 | import gzip
import os
import pickle
import matplotlib
import matplotlib.pyplot as | plt
import numpy as np
matplotlib.use('TKagg')
def show_attention():
# Load attentions
print('Loading attentions to pickle file')
with gzip.open(
os.path.join('training_results', 'torch_train', 'attentions.pkl.gz'),
'r') as att_file:
attentions = pickle.load(att_file)
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax | = ax.matshow(np.mean(np.array(attentions),axis=(0,1)), cmap='bone')
fig.colorbar(cax)
# # Set up axes
# ax.set_xticklabels([''] + input_sentence.split(' ') +
# ['<EOS>'], rotation=90)
# ax.set_yticklabels([''] + output_words)
#
# # Show label at every tick
# ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
# ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
show_attention()
|
yoyo2k/l10n-romania | l10n_ro_invoice_report/__init__.py | Python | agpl-3.0 | 1,102 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 Deltatech All Rights Reserved
# Dorin Hongu <dhongu(@)gmail(.)com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public Licens | e
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
########### | ###################################################################
import account_invoice
import company
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sonium0/pymatgen | pymatgen/core/tests/test_surface.py | Python | mit | 13,101 | 0.00084 | #!/usr/bin/python
import unittest
import os
import random
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.surface import Slab, SlabGenerator, generate_all_slabs, \
get_symmetrically_distinct_miller_indices
from pymatgen.symmetry.groups import SpaceGroup
from pymatgen.util.testing import PymatgenTest
def get_path(path_str):
cwd = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(cwd, "..", "..", "..", "test_files", "surface_tests",
path_str)
return path
class SlabTest(PymatgenTest):
def setUp(self):
zno1 = Structure.from_file(get_path("ZnO-wz.cif"), primitive=False)
zno55 = SlabGenerator(zno1, [1, 0, 0], 5, 5, lll_reduce=False,
center_slab=False).get_slab()
self.zno1 = zno1
self.zno55 = zno55
self.h = Structure(Lattice.cubic(3), ["H"],
[[0, 0, 0]])
self.libcc = Structure(Lattice.cubic(3.51004), ["Li", "Li"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
def test_init(self):
zno_slab = Slab(self.zno55.lattice, self.zno55.species,
self.zno55.frac_coords,
self.zno55.miller_index,
self.zno55.oriented_unit_cell,
0, self.zno55.scale_factor)
m =self.zno55.lattice.matrix
area = np.linalg.norm(np.cross(m[0], m[1]))
self.assertAlmostEqual(zno_slab.surface_area, area)
self.assertEqual(zno_slab.lattice.lengths_and_angles,
self.zno55.lattice.lengths_and_angles)
self.assertEqual(zno_slab.oriented_unit_cell.composition,
self.zno1.composition)
self.assertEqual(len(zno_slab), 8)
def test_add_adsorbate_atom(self):
zno_slab = Slab(self.zno55.lattice, self.zno55.species,
self.zno55.frac_coords,
self.zno55.miller_index,
self.zno55.oriented_unit_cell,
0, self.zno55.scale_factor)
zno_slab.add_adsorbate_atom([1], 'H', 1)
self.assertEqual(len(zno_slab), 9)
self.assertEqual(str(zno_slab[8].specie), 'H')
self.assertAlmostEqual(zno_slab.get_distance(1, 8), 1.0)
self.assertTrue(zno_slab[8].c > zno_slab[0].c)
m = self.zno55.lattice.matrix
area = np.linalg.norm(np.cross(m[0], m[1]))
self.assertAlmostEqual(zno_slab.surface_area, area)
self.assertEqual(zno_slab.lattice.lengths_and_angles,
self.zno55.lattice.lengths_and_angles)
def test_get_sorted_structure(self):
species = [str(site.specie) for site in
self.zno55.get_sorted_structure()]
self.assertEqual(species, ["Zn2+"] * 4 + ["O2-"] * 4)
def test_methods(self):
#Test various structure methods
self.zno55.get_primitive_structure()
def test_as_from_dict(self):
d = self.zno55.as_dict()
obj = Slab.from_dict(d)
self.assertEqual(obj.miller_index, (1, 0, 0))
def test_dipole_and_is_polar(self):
self.assertArrayAlmostEqual(self.zno55.dipole, [0, 0, 0])
self.assertFalse(self.zno55.is_polar())
cscl = self.get_structure("CsCl")
cscl.add_oxidation_state_by_element({"Cs": 1, "Cl": -1})
slab = SlabGenerator(cscl, [1, 0, 0], 5, 5,
lll_reduce=False, center_slab=False).get_slab()
self.assertArrayAlmostEqual(slab.dipole, [-4.209, 0, 0])
self.assertTrue(slab.is_polar())
class SlabGeneratorTest(PymatgenTest):
def test_get_slab(self):
s = self.get_structure("LiFePO4")
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
s = gen.get_slab(0.25)
self.assertAlmostEqual(s.lattice.abc[2], 20.820740000000001)
fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10)
slab = gen.get_slab()
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10, primitive=False)
slab_non_prim = gen.get_slab()
self.assertEqual(len(slab), 6)
self.assertEqual(len(slab_non_prim), len(slab) * 4)
#Some randomized testing of cell vectors
for i in range(1, 231):
i = random.randint(1, 230)
sg = SpaceGroup.from_int_number(i)
if sg.crystal_system == "hexagonal" or (sg.crystal_system == \
"trigonal" and sg.symbol.endswith("H")):
latt = Lattice.hexagonal(5, 10)
else:
#Cubic lattice is compatible with all other space groups.
latt = Lattice.cubic(5)
s = Structure.from_spacegroup(i, latt, ["H"], [[0, 0, 0]])
miller = (0, 0, 0)
while miller == (0, 0, 0):
miller = (random.randint(0, 6), random.randint(0, 6),
random.randint(0, 6))
gen = SlabGenerator(s, miller, 10, 10)
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
def test_normal_search(self):
fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
for miller in [(1, 0, 0), (1, 1, 0), (1, 1, 1), (2, 1, 1)]:
gen = SlabGenerator(fcc, miller, 10, 10)
gen_normal = SlabGenerator(fcc, miller, 10, 10,
max_normal_search=max(miller))
slab = gen_normal.get_slab()
self.assertAlmostEqual(slab.lattice.alpha, 90)
self.assertAlmostEqual(slab.lattice.beta, 90)
self.assertGreaterEqual(len(gen_normal.oriented_unit_cell),
len(gen.oriented_unit_cell))
graphite = self.get_structure("Graphite")
for miller in [(1, 0, 0), (1, 1, 0), (0, 0, 1), (2, 1, 1)]:
gen = SlabGenerator(graphite, miller, 10, 10)
gen_normal = SlabGenerator(graphite, miller, 10, 10,
max_normal_search=max(miller))
self.assertG | reaterEqual(len(gen_normal.oriented_unit_cell),
len(gen.oriented_unit_cell))
sc = Structure(Lattice.hexagonal(3.32, 5.15), ["Sc", "Sc"],
| [[1/3, 2/3, 0.25], [2/3, 1/3, 0.75]])
gen = SlabGenerator(sc, (1, 1, 1), 10, 10, max_normal_search=1)
self.assertAlmostEqual(gen.oriented_unit_cell.lattice.angles[1], 90)
def test_get_slabs(self):
gen = SlabGenerator(self.get_structure("CsCl"), [0, 0, 1], 10, 10)
#Test orthogonality of some internal variables.
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
self.assertEqual(len(gen.get_slabs()), 1)
s = self.get_structure("LiFePO4")
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
self.assertEqual(len(gen.get_slabs()), 5)
self.assertEqual(len(gen.get_slabs(bonds={("P", "O"): 3})), 2)
# There are no slabs in LFP that does not break either P-O or Fe-O
# bonds for a miller index of [0, 0, 1].
self.assertEqual(len(gen.get_slabs(
bonds={("P", "O"): 3, ("Fe", "O"): 3})), 0)
#If we allow some broken bonds, there are a few slabs.
self.assertEqual(len(gen.get_slabs(
bonds={("P", "O"): 3, ("Fe", "O"): 3},
max_broken_bonds=2)), 2)
# At this threshold, only the origin and center Li results in
# clustering. All other sites are non-clustered. So the of
# slabs is of sites in LiFePO4 unit cell - 2 + 1.
self.assertEqual(len(gen.get_slabs(tol=1e-4)), 15)
LiCoO2 = Structure.from_file(get_path("icsd_LiCoO2.cif"),
primitive=False)
|
dozymoe/fireh_runner | modules/odoo_test.py | Python | mit | 7,100 | 0.003239 | """ Odoo Test module.
Odoo is an OpenERP framework.
A copy of odoo.py with added features that eases testing.
Website: http://www.odoo.com
"""
import logging
import os
import subprocess
import sys
# set server timezone in UTC before time module imported
os.environ['TZ'] = 'UTC'
SHELL_TIMEOUT = None
SHELL_ENV_QUIET = 'RUNNER_SUBPROCESS_ARG_QUIET'
SHELL_ENV_WITH_SERVER = 'RUNNER_SUBPROCESS_ARGS_WITH_SERVER'
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger(__name__)
def _get_realfile():
path = os.path.abspath(__file__)
return os.path.splitext(path)[0] + '_.p | y'
def odoo_test(loader, project=None, variant='testing', *args): #pylint:disable=keyword-arg-before-vararg
project, variant = loader.setup_project_env(project, variant)
loader.setup_virtualenv()
loader.setup_shell_env()
config = loader.get_project_config()
| python_bin = loader.get_python_bin()
config_file = os.environ.get('ODOO_CONFIG_FILE')
work_dir = config.get('work_dir', project)
work_dir = loader.expand_path(work_dir)
# bugfix, command like `odoo.py shell`, the word 'shell' must be mentioned
# before we define --config, weird
binargs = [python_bin, _get_realfile()] + list(args)
if config_file:
config_file = os.path.join(loader.config['work_dir'], config_file)
binargs.append('--config=' + config_file)
os.chdir(work_dir)
os.execvp(binargs[0], binargs)
def odoo_test_cleardb(loader, project=None, variant='testing', *args): #pylint:disable=keyword-arg-before-vararg
project, variant = loader.setup_project_env(project, variant)
loader.setup_virtualenv()
loader.setup_shell_env()
config = loader.get_project_config()
python_bin = loader.get_python_bin()
config_file = os.environ.get('ODOO_CONFIG_FILE')
work_dir = config.get('work_dir', project)
work_dir = loader.expand_path(work_dir)
# bugfix, command like `odoo.py shell`, the word 'shell' must be mentioned
# before we define --config, weird
binargs = [python_bin, _get_realfile(), 'cleardb'] + list(args)
if config_file:
config_file = os.path.join(loader.config['work_dir'], config_file)
binargs.append('--config=' + config_file)
os.chdir(work_dir)
os.execvp(binargs[0], binargs)
def odoo_test_shell(loader, project=None, variant='testing', *args): #pylint:disable=keyword-arg-before-vararg
project, variant = loader.setup_project_env(project, variant)
loader.setup_virtualenv()
loader.setup_shell_env()
config = loader.get_project_config()
python_bin = loader.get_python_bin()
config_file = os.environ.get('ODOO_CONFIG_FILE')
loader.setup_shell_env(config.get('shell_env', {}))
work_dir = config.get('work_dir', project)
work_dir = loader.expand_path(work_dir)
# bugfix, command like `odoo.py shell`, the word 'shell' must be mentioned
# before we define --config, weird
binargs = [python_bin, _get_realfile(), 'shell'] + list(args)
if config_file:
config_file = os.path.join(loader.config['work_dir'], config_file)
binargs.append('--config=' + config_file)
os.chdir(work_dir)
os.execvp(binargs[0], binargs)
def odoo_test_install(loader, project=None, variant='testing', *args): #pylint:disable=keyword-arg-before-vararg
project, variant = loader.setup_project_env(project, variant)
loader.setup_virtualenv()
loader.setup_shell_env()
config = loader.get_project_config()
python_bin = loader.get_python_bin()
config_file = os.environ.get('ODOO_CONFIG_FILE')
work_dir = config.get('work_dir', project)
work_dir = loader.expand_path(work_dir)
# bugfix, command like `odoo.py shell`, the word 'shell' must be mentioned
# before we define --config, weird
binargs = [python_bin, _get_realfile(), 'install'] + list(args)
if config_file:
config_file = os.path.join(loader.config['work_dir'], config_file)
binargs.append('--config=' + config_file)
os.chdir(work_dir)
os.execvp(binargs[0], binargs)
def odoo_test_uninstall(loader, project=None, variant='testing', *args): #pylint:disable=keyword-arg-before-vararg
project, variant = loader.setup_project_env(project, variant)
loader.setup_virtualenv()
loader.setup_shell_env()
config = loader.get_project_config()
python_bin = loader.get_python_bin()
config_file = os.environ.get('ODOO_CONFIG_FILE')
work_dir = config.get('work_dir', project)
work_dir = loader.expand_path(work_dir)
# bugfix, command like `odoo.py shell`, the word 'shell' must be mentioned
# before we define --config, weird
binargs = [python_bin, _get_realfile(), 'uninstall'] + list(args)
if config_file:
config_file = os.path.join(loader.config['work_dir'], config_file)
binargs.append('--config=' + config_file)
os.chdir(work_dir)
os.execvp(binargs[0], binargs)
def odoo_test_upgrade(loader, project=None, variant='testing', *args): #pylint:disable=keyword-arg-before-vararg
project, variant = loader.setup_project_env(project, variant)
loader.setup_virtualenv()
loader.setup_shell_env()
config = loader.get_project_config()
python_bin = loader.get_python_bin()
config_file = os.environ.get('ODOO_CONFIG_FILE')
work_dir = config.get('work_dir', project)
work_dir = loader.expand_path(work_dir)
# bugfix, command like `odoo.py shell`, the word 'shell' must be mentioned
# before we define --config, weird
binargs = [python_bin, _get_realfile(), 'upgrade']
if config_file:
config_file = os.path.join(loader.config['work_dir'], config_file)
binargs.append('--config=' + config_file)
for arg in args:
if arg.startswith('-'):
binargs.append(arg)
os.chdir(work_dir)
for mod in args:
if mod.startswith('-'):
continue
_logger.info("Upgrading module '%s'.", mod)
ret = subprocess.call(binargs + [mod])
if ret:
sys.exit(ret)
sys.exit(0)
def odoo_test_list_installed(loader, project=None, variant='testing', *args): #pylint:disable=keyword-arg-before-vararg
project, variant = loader.setup_project_env(project, variant)
loader.setup_virtualenv()
loader.setup_shell_env()
config = loader.get_project_config()
python_bin = loader.get_python_bin()
config_file = os.environ.get('ODOO_CONFIG_FILE')
work_dir = config.get('work_dir', project)
work_dir = loader.expand_path(work_dir)
# bugfix, command like `odoo.py shell`, the word 'shell' must be mentioned
# before we define --config, weird
binargs = [python_bin, _get_realfile(), 'list-installed'] + list(args)
if config_file:
config_file = os.path.join(loader.config['work_dir'], config_file)
binargs.append('--config=' + config_file)
os.chdir(work_dir)
os.execvp(binargs[0], binargs)
commands = (odoo_test, odoo_test_cleardb, odoo_test_install,
odoo_test_uninstall, odoo_test_upgrade, odoo_test_shell,
odoo_test_list_installed)
|
rh-s/heat | heat/tests/test_glance_image.py | Python | apache-2.0 | 7,257 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient import exc as glance_exceptions
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.openstack.glance import glance_image as gi
from heat.engine import stack as parser
from heat.engine import template
from heat.tests import common
from heat.tests import utils
image_template = '''
heat_template_version: 2013-05-23
description: This template to define a glance image.
resources:
my_image:
type: OS::Glance::Image
properties:
name: cirros_image
id: 41f0e60c-ebb4-4375-a2b4-845ae8b9c995
disk_format: qcow2
container_format: bare
is_public: True
min_disk: 10
min_ram: 512
protected: False
location: https://launchpad.net/cirros/cirros-0.3.0-x86_64-disk.img
'''
image_template_validate = '''
heat_template_version: 2013-05-23
description: This template to define a glance image.
resources:
image:
type: OS::Glance::Image
properties:
name: image_validate
disk_format: qcow2
container_format: bare
location: https://launchpad.net/cirros/cirros-0.3.0-x86_64-disk.img
'''
class GlanceImageTest(common.HeatTestCase):
def setUp(self):
super(GlanceImageTest, self).setUp()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
tpl = template_format.parse(image_template)
self.stack = parser.Stack(
self.ctx, 'glance_image_test_stack',
template.Template(tpl)
)
self.my_image = self.stack['my_image']
glance = mock.MagicMock()
self.glanceclient = mock.MagicMock()
self.my_image.glance = glance
glance.return_value = self.glanceclient
self.images = self.glanceclient.images
def _test_validate(self, resource, error_msg):
exc = self.assertRaises(exception.StackValidationFailed,
resource.validate)
self.assertIn(error_msg, six.text_type(exc))
def test_resource_mapping(self):
mapping = gi.resource_mapping()
self.assertEqual(1, len(mapping))
self.assertEqual(gi.GlanceImage, mapping['OS::Glance::Image'])
self.assertIsInstance(self.my_image, gi.GlanceImage)
def test_invalid_min_disk(self):
# invalid 'min_disk'
tpl = template_format.parse(image_template_validate)
stack = parser.Stack(
self.ctx, 'glance_image_stack_validate',
template.Template(tpl)
)
image = stack['image']
image.t['Properties']['min_disk'] = -1
error_msg = ('Property error: resources.image.properties.min_disk: '
'-1 is out of range (mi | n: 0, max: None)')
self._test_validate(image, error_msg)
def test_invalid_min_ram(self):
# invalid 'min_ram'
tpl = template_format.parse(image_template_validate)
stack = parser.Stack(
self.ctx, 'glance_image_stack_validate',
template.Template(tpl)
)
image = stack['image']
image.t['Properties']['min_ram'] = -1
error_msg = ('Property error: resources.image.properties.min_ram | : '
'-1 is out of range (min: 0, max: None)')
self._test_validate(image, error_msg)
def test_miss_disk_format(self):
# miss disk_format
tpl = template_format.parse(image_template_validate)
stack = parser.Stack(
self.ctx, 'glance_image_stack_validate',
template.Template(tpl)
)
image = stack['image']
image.t['Properties'].pop('disk_format')
error_msg = 'Property disk_format not assigned'
self._test_validate(image, error_msg)
def test_invalid_disk_format(self):
# invalid disk_format
tpl = template_format.parse(image_template_validate)
stack = parser.Stack(
self.ctx, 'glance_image_stack_validate',
template.Template(tpl)
)
image = stack['image']
image.t['Properties']['disk_format'] = 'incorrect_format'
error_msg = ('Property error: '
'resources.image.properties.disk_format: '
'"incorrect_format" is not an allowed value '
'[ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, iso]')
self._test_validate(image, error_msg)
def test_miss_container_format(self):
# miss container_format
tpl = template_format.parse(image_template_validate)
stack = parser.Stack(
self.ctx, 'glance_image_stack_validate',
template.Template(tpl)
)
image = stack['image']
image.t['Properties'].pop('container_format')
error_msg = 'Property container_format not assigned'
self._test_validate(image, error_msg)
def test_invalid_container_format(self):
# invalid container_format
tpl = template_format.parse(image_template_validate)
stack = parser.Stack(
self.ctx, 'glance_image_stack_validate',
template.Template(tpl)
)
image = stack['image']
image.t['Properties']['container_format'] = 'incorrect_format'
error_msg = ('Property error: '
'resources.image.properties.container_format: '
'"incorrect_format" is not an allowed value '
'[ami, ari, aki, bare, ova, ovf]')
self._test_validate(image, error_msg)
def test_miss_location(self):
# miss location
tpl = template_format.parse(image_template_validate)
stack = parser.Stack(
self.ctx, 'glance_image_stack_validate',
template.Template(tpl)
)
image = stack['image']
image.t['Properties'].pop('location')
error_msg = 'Property location not assigned'
self._test_validate(image, error_msg)
def test_image_handle_create(self):
value = mock.MagicMock()
image_id = '41f0e60c-ebb4-4375-a2b4-845ae8b9c995'
value.id = image_id
self.images.create.return_value = value
self.my_image.handle_create()
self.assertEqual(image_id, self.my_image.resource_id)
def test_image_handle_delete(self):
self.resource_id = None
self.assertIsNone(self.my_image.handle_delete())
image_id = '41f0e60c-ebb4-4375-a2b4-845ae8b9c995'
self.my_image.resource_id = image_id
self.images.delete.return_value = None
self.assertIsNone(self.my_image.handle_delete())
self.images.delete.side_effect = glance_exceptions.HTTPNotFound(404)
self.assertIsNone(self.my_image.handle_delete())
|
grimoirelab/GrimoireELK | utils/gelk.py | Python | gpl-3.0 | 5,856 | 0.000854 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2019 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
import argparse
import logging
from datetime import datetime
from os import sys
from perceval.backends.bugzilla import Bugzilla
from perceval.backends.gerrit import Gerrit
from perceval.backends.github import GitHub
from grimoire_elk.elastic import ElasticConnectException
from grimoire_elk.elastic import ElasticSearch
from grimoire_elk.enriched.bugzilla import BugzillaEnrich
from grimoire_elk.enriched.gerrit import GerritEnrich
from grimoire_elk.enriched.github import GitHubEnrich
from grimoire_elk.enriched.sortinghat_gelk import SortingHat
from grimoire_elk.raw.bugzilla import BugzillaOcean
from grimoire_elk.raw.elastic import ElasticOcean
from grimoire_elk.raw.gerrit import GerritOcean
from grimoire_elk.raw.github import GitHubOcean
def get_connector_from_name(name, connectors):
found = None
for connector in connectors:
backend = connector[0]
if backend.get_name() == name:
found = connector
return found
if __name__ == '__main__':
"""Gelk: perceval2ocean and ocean2kibana"""
connectors = [[Bugzilla, BugzillaOcean, BugzillaEnrich],
[GitHub, GitHubOcean, GitHubEnrich],
[Gerrit, GerritOcean, GerritEnrich]] # Will come from Registry
parser = argparse.ArgumentParser()
ElasticOcean.add_params(parser)
subparsers = parser.add_subparsers(dest='backend',
help='perceval backend')
for connector in connectors:
name = connector[0].get_name()
subparser = subparsers.add_parser(name, help='gelk %s -h' % name)
# We need para | ms for feed
connector[0].add_params(subparser)
args = parser.parse_args()
app_init = datetime.now()
backend_name = args.backend
if not backend_name:
parser.print_help()
sys.exit(0)
if 'debug' in args and | args.debug:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.debug("Debug mode activated")
else:
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
connector = get_connector_from_name(backend_name, connectors)
backend = connector[0](**vars(args))
ocean_backend = connector[1](backend, **vars(args))
enrich_backend = connector[2](backend, **vars(args))
es_index = backend.get_name() + "_" + backend.get_id()
clean = args.no_incremental
if args.cache:
clean = True
try:
# Ocean
elastic_state = ElasticSearch(args.elastic_url,
es_index,
ocean_backend.get_elastic_mappings(),
clean)
# Enriched ocean
enrich_index = es_index + "_enrich"
elastic = ElasticSearch(args.elastic_url,
enrich_index,
enrich_backend.get_elastic_mappings(),
clean)
except ElasticConnectException:
logging.error("Can't connect to Elastic Search. Is it running?")
sys.exit(1)
ocean_backend.set_elastic(elastic_state)
enrich_backend.set_elastic(elastic)
try:
# First feed the item in Ocean to use it later
logging.info("Adding data to %s" % (ocean_backend.elastic.index_url))
ocean_backend.feed()
if backend_name == "github":
GitHub.users = enrich_backend.users_from_es()
logging.info("Adding enrichment data to %s" %
(enrich_backend.elastic.index_url))
items = []
new_identities = []
items_count = 0
for item in ocean_backend:
# print("%s %s" % (item['url'], item['lastUpdated_date']))
if len(items) >= elastic.max_items_bulk:
enrich_backend.enrich_items(items)
items = []
items.append(item)
# Get identities from new items to be added to SortingHat
identities = ocean_backend.get_identities(item)
if not identities:
identities = []
for identity in identities:
if identity not in new_identities:
new_identities.append(identity)
items_count += 1
enrich_backend.enrich_items(items)
logging.info("Total items enriched %i " % items_count)
logging.info("Total new identities to be checked %i" % len(new_identities))
merged_identities = SortingHat.add_identities(new_identities, backend_name)
# Redo enrich for items with new merged identities
except KeyboardInterrupt:
logging.info("\n\nReceived Ctrl-C or other break signal. Exiting.\n")
logging.debug("Recovering cache")
backend.cache.recover()
sys.exit(0)
total_time_min = (datetime.now() - app_init).total_seconds() / 60
logging.info("Finished in %.2f min" % (total_time_min))
|
vivaxy/algorithms | python/problems/add_binary.py | Python | mit | 1,091 | 0.000917 | """
https://leetcode.com/problems/add-binary/
https://leetcode.com/submissions/detail/106524947/
"""
class Solution(object):
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
length = max(len(a), len(b))
remaining = 0
index = 0
result = ''
while index < length:
bit1 = 0
if len(a) > index:
bit1 = int(a[len(a) - index - 1])
bit2 = 0
if len(b) > index:
bit2 = int(b[len(b) - index - 1])
sum_value = bit1 + bit2 + remaining
remaining = 0
if sum_value > 1:
| remaining = 1
sum_value = sum_value - 2
result = str(sum_valu | e) + result
index += 1
if remaining != 0:
result = str(1) + result
return result
import unittest
class Test(unittest.TestCase):
def test(self):
self.assertEqual(Solution().addBinary('11', '1'), '100')
if __name__ == '__main__':
unittest.main()
|
daringer/schemmaker | src/force_optimizer/group.py | Python | apache-2.0 | 7,382 | 0.004741 | '''
Created on 23.08.2014
@author: christian auth
'''
from block import Block
class Group:
def __init__(self, group_id):
#set the ID of the group
self.group_id = group_id
#set the parent group
self.parent = None
self.childs = []
self.is_bias = False
self.is_bias_connected = False
#Frame size and origin
self.size_width = 0
self.size_height = 0
self.position_x = 0
self.position_y = 0
#Lists includes the groups in the neighborhood relative to their position
self.neighbor_north = []
self.neighbor_south = []
self.neighbor_west = []
self.neighbor_east = []
#List with all neighbor-groups, which are not sorted in a list from above
self.neighbor_unsorted = []
#Lists includes the neighbors which are not in the parent group
self.neighbor_north_extern = []
self.neighbor_south_extern = []
self.neighbor_west_extern = []
self.neighbor_east_extern = []
self.neighbor_extern = []
# Lists includes childs with connection to the neighbor of the group
self.child_north = []
self.child_south = []
self.child_west = []
self.child_east = []
self.childs_east_sorted = []
#Dictionary to count the connections between the groups
self.neighbors = {}
#List with all elements in this group
self.blocks = set()
self.block_north = set()
self.block_south = set()
self.block_west = set()
self.block_east = set()
#
self.distance_to_out = 0
#flags
self.connected_vcc = 0
self.connected_gnd = 0
self.connected_out = 0
self.connected_inp = 0
self.wide_search_flag = 0 # 0:not discover, 1: discover, 2: visited
self.connected_parent_east = 0
self.connected_parent_north = 0
self.connected_parent_south = 0
self.connected_parent_west = 0
self.listfull_north = False
self.listfull_south = False
self.listfull_east = False
self.listfull_west = False
def add_neighbor(self,neighbor,block):
if neighbor in self.neighbors.keys():
self.neighbors[neighbor].append(block)
else:
self.neighbors[neighbor] = [block]
if neighbor.parent is self.parent:
self.neighbor_unsorted.append(neighbor)
else:
self.neighbor_extern.append(neighbor)
def add_block(self,block):
self.blocks.add(block)
def add_child(self, child):
| if self.childs.count(child) == 0:
self.childs.append(child)
def __str__(self):
nl = "\n"
less_padding = 16
padding = 20
more_padding = 24
# header (group_id + size + pos)
o = "" + nl
o += "+------------------------------------------" + nl
o += "| {}: {} - Size: {}x{} - Pos: {}x{}{}".format("Group", self.group_id,
self.size_width, self.size_height,
self.position_x, self.positio | n_y, nl, pad=less_padding)
o += "+------------------------------------------" + nl
#show is bias
if self.is_bias:
o += "|{:>{pad}} {}".format("BIAS", nl, pad=padding)
# show parent
if self.parent is not None:
o += "|{:>{pad}}: {}{}".format("Parent", self.parent.group_id, nl, pad=padding)
# list children
children = []
for child in self.childs:
children.append(child.group_id)
o += "|{:>{pad}}: {}{}".format("Children", children, nl, pad=padding)
o += "|{:>{pad}}: {}{}".format("Blocks", ", ".join(b.name for b in self.blocks), nl, pad=padding)
# list all blocks
n_types = (("EAST", self.block_east),
("WEST", self.block_west),
("NORTH", self.block_north),
("SOUTH", self.block_south))
for direction, data in n_types:
if len(data) > 0:
o += "|{:>{pad}}: {}{}".format(
direction, ", ".join(str(n.name) for n in data), nl, pad=more_padding)
# connected to which ports
c_types = (("OUT", self.connected_out),
("VDD", self.connected_vcc),
("GND", self.connected_gnd))
_c = ["{}: {}".format(name, num) for name, num in c_types if num]
o += "|{:>{pad}}: {} {}".format("Connected to", ", ".join(_c), nl, pad=padding)
# neighbor count
o += "|{:>{pad}}: ".format("Neighbors", pad=padding)
if len(self.neighbors) > 0:
o += ", ".join(("{}x {}".format(len(v), k.group_id)) for k, v in self.neighbors.items())
o += nl
# list all neighbors
n_types = (("EAST", self.neighbor_east),
("WEST", self.neighbor_west),
("NORTH", self.neighbor_north),
("SOUTH", self.neighbor_south))
for direction, data in n_types:
if len(data) > 0:
o += "|{:>{pad}}: {}{}".format(
direction, ", ".join(str(n.group_id) for n in data), nl, pad=more_padding)
# show unsorted neighbors
o += "|{:>{pad}}: ".format("Unsorted Neighbors", pad=padding)
o += "{}{}".format("", ", ".join(str(n.group_id) for n in self.neighbor_unsorted ), nl, pad=padding)
o += nl
# show extern neighbors
o += "|{:>{pad}}: ".format("Extern Neighbors", pad=padding)
o += "{}{}".format("", ", ".join(str(n.group_id) for n in self.neighbor_extern ), nl, pad=padding)
o += nl
# show parent's neighbor conns
p_con_type = (("EAST", self.connected_parent_east),
("WEST", self.connected_parent_west),
("NORTH", self.connected_parent_north),
("SOUTH", self.connected_parent_south))
o += "|{:>{pad}} {}".format("Parent's neighbor", nl, pad=padding)
o += "|{:>{pad}}: {}{}".format("connections",
", ".join(("{}:{}".format(key, data)) \
for key, data in p_con_type if data), nl, pad=padding)
# footer
o += "+------------------------------------------" + nl
o += nl
return o
def are_neighbor(self, group):
'''
function searches if an other group is the neighbor of this group
Parameter return: 0: NORTH
1: SOUTH
2: EAST
3: WEST
4: Unsorted
-1: NO neighbor
'''
for neighbor in self.neighbor_north:
if neighbor == group:
return 0 # means NORTH
for neighbor in self.neighbor_south:
if neighbor == group:
return 1 # means SOUTH
for neighbor in self.neighbor_east:
if neighbor == group:
return 2 # means EAST
for neighbor in self.neighbor_west:
if neighbor == group:
return 3 # means WEST
for neighbor in self.neigbor_unsorted:
if neighbor == group:
return 4 # means Unsorted
return -1
|
Fabfm4/Sita-BackEnd | src/sita/payments/migrations/0001_initial.py | Python | apache-2.0 | 1,397 | 0.003579 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-05-22 03:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True, verbose_name='created date')),
('last_modified', models.DateTimeField(auto_now=True, null=True, verbose_name='last modified')),
('conekta_id', models.BinaryField()),
('card_last_four', models.CharField(max_length=4)),
('card_brand', models.CharField(choices=[(b'VISA', b'V | ISA'), (b'MASTERCARD', b'MASTERCARD'), | (b'AMEX', b'AMEX')], max_length=10)),
('amount', models.DecimalField(decimal_places=2, max_digits=10)),
('description', models.TextField()),
('reference_id_conekta', models.BinaryField()),
('currency', models.CharField(max_length=10)),
('title_subscription', models.CharField(max_length=254)),
],
options={
'abstract': False,
},
),
]
|
altanawealth/django-report-builder | report_builder_demo/demo_models/migrations/0004_waiter_days_worked.py | Python | bsd-3-clause | 434 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('demo_models', '0003_auto_20150419_2110'),
]
operations = [
migrations.AddField( | model_name='waiter',
name='days_worked',
field=models.IntegerField(default=None, null=True, blank=True),
),
]
| |
Alex-Ian-Hamilton/sunpy | sunpy/spectra/tests/test_callisto.py | Python | bsd-2-clause | 10,406 | 0.000961 | # -*- coding: utf-8 -*-
# Author: Florian Mayer <florian.mayer@bitsrc.org>
from __future__ import absolute_import
import shutil
from tempfile import mkdtemp
from datetime import datetime
import pytest
import os
import glob
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
import sunpy.data.test
from sunpy.spectra.sources.callisto import (
CallistoSpectrogram, query, download, minimal_pairs
)
@pytest.fixture
def CALLISTO_IMAGE():
testpath = sunpy.data.test.rootdir
return os.path.join(testpath, 'BIR_20110922_050000_01.fit')
@pytest.fixture
def CALLISTO_IMAGE_GLOB_KEY():
return 'BIR_*'
@pytest.fixture
def CALLISTO_IMAGE_GLOB_INDEX(CALLISTO_IMAGE, CALLISTO_IMAGE_GLOB_KEY):
testpath = sunpy.data.test.rootdir
res = glob.glob(os.path.join(testpath, CALLISTO_IMAGE_GLOB_KEY))
return res.index(CALLISTO_IMAGE)
def test_read(CALLISTO_IMAGE):
ca = CallistoSpectrogram.read(CALLISTO_IMAGE)
assert ca.start == datetime(2011, 9, 22, 5, 0, 0, 454000)
assert ca.t_init == 18000.0
assert ca.shape == (200, 3600)
assert ca.t_delt == 0.25
# Test linearity of time axis.
assert np.array_equal(
ca.time_axis, np.linspace(0, 0.25 * (ca.shape[1] - 1), ca.shape[1])
)
assert ca.dtype == np.uint8
@pytest.mark.online
def test_query():
URL = 'http://soleil.i4ds.ch/solarradio/data/2002-20yy_Callisto/2011/09/22/'
result = list(query(
datetime(2011, 9, 22, 5), datetime(2011, 9, 22, 6), set(["BIR"])
))
RESULTS = [
"BIR_20110922_050000_01.fit.gz",
"BIR_20110922_051500_01.fit.gz",
"BIR_20110922_053000_01.fit.gz",
"BIR_20110922_050000_03.fit.gz",
"BIR_20110922_051500_03.fit.gz",
"BIR_20110922_053000_03.fit.gz",
"BIR_20110922_054500_03.fit.gz",
]
RESULTS.sort()
# Should be sorted anyway, but better to assume as little as possible.
result.sort()
for item in RESULTS:
assert URL + item in result
@pytest.mark.online
@pytest.mark.xfail
def test_query_number():
URL = 'http://soleil.i4ds.ch/solarradio/data/2002-20yy_Callisto/2011/09/22/'
result = list(query(
datetime(2011, 9, 22, 5), datetime(2011, 9, 22, 6), set([("BIR", 1)])
))
RESULTS = [
"BIR_20110922_050000_01.fit.gz",
"BIR_20110922_051500_01.fit.gz",
"BIR_20110922_053000_01.fit.gz",
]
RESULTS.sort()
# Should be sorted anyway, but better to assume as little as possible.
result.sort()
assert len(result) == len(RESULTS)
@pytest.mark.online
@pytest.mark.xfail
def test_download():
directory = mkdtemp()
try:
result = query(
datetime(2011, 9, 22, 5), datetime(2011, 9, 22, 6), set([("BIR", 1)])
)
RESULTS = [
"BIR_20110922_050000_01.fit.gz",
"BIR_20110922_051500_01.fit.gz",
"BIR_20110922_053000_01.fit.gz",
]
download(result, directory)
for item in RESULTS:
assert item in sorted(os.listdir(directory))
finally:
shutil.rmtree(directory)
def test_create_file(CALLISTO_IMAGE):
ca = CallistoSpectrogram.create(CALLISTO_IMAGE)
assert np.array_equal(ca.data, CallistoSpectrogram.read(CALLISTO_IMAGE).data)
def test_create_file_kw(CALLISTO_IMAGE):
ca = CallistoSpectrogram.create(filename=CALLISTO_IMAGE)
assert np.array_equal(ca.data, CallistoSpectrogram.read(CALLISTO_IMAGE).data)
@pytest.mark.online
def test_create_url():
URL = (
"http://soleil.i4ds.ch/solarradio/data/2002-20yy_Callisto/2011/09/22/"
"BIR_20110922_050000_01.fit.gz"
)
ca = CallistoSpectrogram.create(URL)
assert np.array_equal(ca.data, CallistoSpectrogram.read(URL).data)
@pytest.mark.online
def test_create_url_kw():
URL = (
"http://soleil.i4ds.ch/solarradio/data/2002-20yy_Callisto/2011/09/22/"
"BIR_20110922_050000_01.fit.gz"
)
ca = CallistoSpectrogram.create(url=URL)
assert np.array_equal(ca.data, CallistoSpectrogram.read(URL).data)
def test_create_single_glob(CALLISTO_IMAGE, CALLISTO_IMAGE_GLOB_INDEX, CALLISTO_IMAGE_GLOB_KEY):
PATTERN = os.path.join(os.path.dirname(CALLISTO_IMAGE), CALLISTO_IMAGE_GLOB_KEY)
ca = CallistoSpectrogram.create(PATTERN)
assert_allclose(ca[CALLISTO_IMAGE_GLOB_INDEX].data,
CallistoSpectrogram.read(CALLISTO_IMAGE).data)
# seems like this does not work anymore and can't figure out what it is for
#def test_create_single_glob_kw(CALLISTO_IMAGE):
# PATTERN = os.path.join( os.path.dirname(CALLISTO_IMAGE), "BIR_*")
# ca = CallistoSpectrogram.create(singlepattern=PATTERN)
# assert np.array_equal(ca[0].data, CallistoSpectrogram.read(CALLISTO_IMAGE).data)
def test_create_glob_kw(CALLISTO_IMAGE, CALLISTO_IMAGE_GLOB_INDEX, CALLISTO_IMAGE_GLOB_KEY):
PATTERN = os.path.join(
os.path.dirname(CALLISTO_IMAGE),
CALLISTO_IMAGE_GLOB_KEY
)
ca = CallistoSpectrogram.create(pattern=PATTERN)[CALLISTO_IMAGE_GLOB_INDEX]
assert_allclose(ca.data, CallistoSpectrogram.read(CALLISTO_IMAGE).data)
def test_create_glob(CALLISTO_IMAGE_GLOB_KEY):
PATTERN = os.path.join(
os.path.dirname(sunpy.data.test.__file__),
CALLISTO_IMAGE_GLOB_KEY
)
ca = CallistoSpectrogram.create(PATTERN)
assert len(ca) == 2
def test_minimum_pairs_commotative():
A = [0, 1, 2]
B = [1, 2, 3]
first = list(minimal_pairs(A, B))
assert first == [(b, a, d) for a, b, d in minimal_pairs(B, A)]
def test_minimum_pairs_end():
assert (
list(minimal_pairs([0, 1, 2, 4], [1, 2, 3, 4])) ==
[(1, 0, 0), (2, 1, 0), (3, 3, 0)]
)
def test_minimum_pairs_end_more():
assert (
list(minimal_pairs([0, 1, 2, 4, 8], [1, 2, 3, 4])) ==
[(1, 0, 0), (2, 1, 0), (3, 3, 0)]
)
def test_minimum_pairs_end_diff():
assert (
list(minimal_pairs([0, 1, 2, 8], [1, 2, 3, 4])) ==
[(1, 0, 0), (2, 1, 0), (3, 3, 4)]
)
def test_closest():
assert (
list(minimal_pairs([50, 60], [0, 10, 20, 30, 40, 51, 52])) ==
[(0, 5, 1), (1, 6, 8)]
)
def test_homogenize_factor():
a = np.float64(np.random.randint(0, 255, 3600))[np.newaxis, :]
c1 = CallistoSpectrogram(
a,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
b = 2 * a
c2 = CallistoSpectrogram(
b,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
| 'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
pairs_indices, factors, constants = c1._homogenize_params(
c2, 0
)
assert pairs_indices == [(0, 0)]
assert_array_almost_equal(factors, [0.5], 2)
assert_array_almost_equal(constants, [0], 2)
assert_array_almost_equal(factors[0] * b + constants[0], a)
def test_homogenize_ | constant():
a = np.float64(np.random.randint(0, 255, 3600))[np.newaxis, :]
c1 = CallistoSpectrogram(
a,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
b = a + 10
c2 = CallistoSpectrogram(
b,
np.arange(3600),
np.array([1]),
datetime(2011, 1, 1),
datetime(2011, 1, 1, 1),
0,
1,
'Time',
'Frequency',
'Test',
None,
None,
None,
False
)
pairs_indices, factors, constants = c1._homogenize_params(
c2, 0
)
assert pairs_indices == [(0, 0)]
assert_array_almost_equal(factors, [1], 2)
assert_array_almost_equal(constants, [-10], 2)
assert_array_almost_equal(factors[0] * b + constants[0], a)
def test_homogenize_both():
a = np.float64(np.random.randint(0, 255, |
morrisonlevi/FrameworkBenchmarks | netty/setup.py | Python | bsd-3-clause | 860 | 0.012791 | import subprocess
import sys
import setup_util
import os
def start(args, logfile, errfile):
try:
subprocess.check_call("mvn clean compile assembly:single", shell=True, cwd="netty", stderr=errfile, stdout=logfile)
subprocess.Popen("java -jar netty-example-0.1-jar-with-dependencies.jar".rsplit(" "), cwd="netty/target", stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
if os.name == 'nt':
subprocess.check_call("wmic process where \"CommandLine LIKE '%netty-example%'\" call terminate", stderr=errfile, stdout=logfile)
else:
p | = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'netty-example' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
return 0
| |
icereval/osf.io | addons/dropbox/views.py | Python | apache-2.0 | 1,287 | 0.003885 | """Views fo the node settings page."""
# -*- coding: utf-8 -*-
from flask import request
import logging
from addons.dropbox.serializer import DropboxSerializer
from addons.base import generic_views
from website.project.decorators import must_have_addon, must_be_addon_authorizer
logger = logging.getLogger(__name__)
debug = logger.debug
SHORT_NAME = 'dropbox'
FULL_NAME = 'Dropbox'
dropbox_account_list = generic_views.account_list(
SHORT_NAME,
DropboxSerializer
)
dropbox_import_auth = generic_views.import_auth(
SHORT_NAME,
DropboxSerializer
)
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
def dropbox_folder_list(node_addon, **kwargs):
""" Returns all the subsequent folders under the folder id passed.
"""
folder_id = request.args.get('folder_id' | )
return node_addon.get_folders(folder_id=folder_id)
dropbox_get_config = generic_views.get_config(
SHORT_NAME,
DropboxSerializer
)
def _set_folder(node_addon, folder, auth):
uid = fol | der['id']
node_addon.set_folder(uid, auth=auth)
node_addon.save()
dropbox_set_config = generic_views.set_config(
SHORT_NAME,
FULL_NAME,
DropboxSerializer,
_set_folder
)
dropbox_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
|
mtlchun/edx | common/test/acceptance/tests/studio/test_studio_settings_details.py | Python | agpl-3.0 | 6,290 | 0.001908 | """
Acceptance tests for Studio's Settings Details pages
"""
from unittest import skip
from acceptance.tests.studio.base_studio_test import StudioCourseTest
from ...fixtures.course import CourseFixture
from ...pages.studio.settings import SettingsPage
from ...pages.studio.overview import CourseOutlinePage
from ...tests.studio.base_studio_test import StudioCourseTest
from ..helpers import (
generate_course_key,
select_option_by_value,
is_option_value_selected,
element_has_text,
)
class SettingsMilestonesTest(StudioCourseTest):
"""
Tests for milestones feature in Studio's settings tab
"""
def setUp(self, is_staff=True):
super(SettingsMilestonesTest, self).setUp(is_staff=is_staff)
self.settings_detail = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Before every test, make sure to visit the page first
self.settings_detail.visit()
self.assertTrue(self.settings_detail.is_browser_on_page())
def test_page_has_prerequisite_field(self):
"""
Test to make sure page has pre-requisite course field if milestones app is enabled.
"""
self.assertTrue(self.settings_detail.pre_requisite_course_options)
def test_prerequisite_course_save_successfully(self):
"""
Scenario: Selecting course from Pre-Requisite course drop down save the selected course as pre-requisite
course.
Given that I am on the Schedule & Details page on studio
When I select an item in pre-requisite course drop down and click Save Changes button
Then My selected item should be saved as pre-requisite course
And My selected item should be selected after refreshing the page.'
"""
course_number = self.unique_id
CourseFixture(
org='test_org',
number=course_number,
run='test_run',
display_name='Test Course' + course_number
).install()
pre_requisite_course_key = generate_course_key(
org='test_org',
number=course_number,
run='test_run'
)
pre_requisite_course_id = unicode(pre_requisite_course_key)
# Refresh the page to load the new course fixture and populate the prrequisite course dropdown
# Then select the prerequisite course and save the changes
self.settings_detail.refresh_page()
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again to confirm the prerequisite course selection is properly reflected
self.settings_detail.refresh_page()
self.assertTrue(is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
))
# Set the prerequisite course back to None and save the changes
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=''
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again to confirm the None selection is properly reflected
self.settings_detail.refresh_page()
self.assertTrue(is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=''
))
# Re-pick the prerequisite course and confirm no errors are thrown (covers a discovered bug)
select_option_by_value(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.settings_detail.save_changes()
self.assertEqual(
'Your changes have been saved.',
self.settings_detail.alert_confirmation_title.text
)
# Refresh the page again to confirm the prerequisite course selection is properly reflected
self.settings_detail.refresh_page()
dropdown_status = is_option_value_selected(
browser_query=self.settings_detail.pre_requisite_course_options,
value=pre_requisite_course_id
)
self.assertTrue(dropdown_status)
def test_page_has_enable_entrance_exam_field(self):
"""
Test to make sure page has 'enable entrance exam' field.
"""
self.assertTrue(self.settings_detail.entrance_exam_field)
@skip('Passes in devstack, passes individually in Jenkins, fai | ls in suite in Jenkins.')
def test_enable_entrance_exam_for_course(self):
"""
Test that entrance exam sh | ould be created after checking the 'enable entrance exam' checkbox.
And also that the entrance exam is destroyed after deselecting the checkbox.
"""
self.settings_detail.require_entrance_exam(required=True)
self.settings_detail.save_changes()
# getting the course outline page.
course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
course_outline_page.visit()
# title with text 'Entrance Exam' should be present on page.
self.assertTrue(element_has_text(
page=course_outline_page,
css_selector='span.section-title',
text='Entrance Exam'
))
# Delete the currently created entrance exam.
self.settings_detail.visit()
self.settings_detail.require_entrance_exam(required=False)
self.settings_detail.save_changes()
course_outline_page.visit()
self.assertFalse(element_has_text(
page=course_outline_page,
css_selector='span.section-title',
text='Entrance Exam'
))
|
osm-pl/osm-translation-scrapper | sites/weblate.py | Python | unlicense | 648 | 0 | from bs4 import BeautifulSoup
import requests as r
def run(projects, language):
base_url = "https://hosted.weblate.org/projects/{0}/{1}/ | #overview"
results = []
for project in projects:
url = base_url.format(project, language)
html_doc = r.get(url).text
soup = BeautifulSoup(html_doc, 'html.parser')
findings = soup.find(attrs={'class': "percent"})
if findings is not None:
value = findings.get_text().replace(",", ".").replace("%", "")
result = [project, float(value)]
else:
result = [project, 0.0]
results.append(result)
return result | s
|
eliseuegewarth/sibaho | empregado/migrations/0008_auto_20170209_1606.py | Python | lgpl-3.0 | 616 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-09 18:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration | ):
dependencies = [
('empregado', '0007_auto_20170208_2116'),
]
operations = [
migrations.Alter | Field(
model_name='estagiario',
name='email',
field=models.EmailField(max_length=100),
),
migrations.AlterField(
model_name='supervisor',
name='email',
field=models.EmailField(max_length=100),
),
]
|
kyvinh/home-assistant | homeassistant/components/emulated_hue/hue_api.py | Python | apache-2.0 | 13,038 | 0 | """Provides a Hue API to control Home Assistant."""
import asyncio
import logging
from aiohttp import web
from homeassistant import core
from homeassistant.const import (
ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON, SERVICE_VOLUME_SET,
SERVICE_OPEN_COVER, SERVICE_CLOSE_COVER, STATE_ON, STATE_OFF,
HTTP_BAD_REQUEST, HTTP_NOT_FOUND, ATTR_SUPPORTED_FEATURES,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS
)
from homeassistant.components.media_player import (
ATTR_MEDIA_VOLUME_LEVEL, SUPPORT_VOLUME_SET,
)
from homeassistant.components.fan import (
ATTR_SPEED, SUPPORT_SET_SPEED, SPEED_OFF, SPEED_LOW,
SPEED_MEDIUM, SPEED_HIGH
)
from homeassistant.components.http import HomeAssistantView
_LOGGER = logging.getLogger(__name__)
ATTR_EMULATED_HUE = 'emulated_hue'
ATTR_EMULATED_HUE_NAME = 'emulated_hue_name'
HUE_API_STATE_ON = 'on'
HUE_API_STATE_BRI = 'bri'
class HueUsernameView(HomeAssistantView):
"""Handle requests to create a username for the emulated hue bridge."""
url = '/api'
name = 'emulated_hue:api:create_username'
extra_urls = ['/api/']
requires_auth = False
@asyncio.coroutine
def post(self, request):
"""Handle a POST request."""
try:
data = yield from request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
if 'devicetype' not in data:
return self.json_message('devicetype not specified',
HTTP_BAD_REQUEST)
return self.json([{'success': {'username': '12345678901234567890'}}])
class HueAllLightsStateView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights'
name = 'emulated_hue:lights:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request, username):
"""Process a request to get the list of available lights."""
hass = request.app['hass']
json_response = {}
for entity in hass.states.async_all():
if self.config.is_entity_exposed(entity):
state, brightness = get_entity_state(self.config, entity)
number = self.config.entity_id_to_number(entity.entity_id)
json_response[number] = entity_to_json(
entity, state, brightness)
return self.json(json_response)
class HueOneLightStateView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights/{entity_id}'
name = 'emulated_hue:light:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request, username, entity_id):
"""Process a request to get the state of an individual light."""
hass = request.app['hass']
entity_id = self.config.number_to_entity_id(entity_id)
entity = hass.states.get(entity_id)
if entity is None:
_LOGGER.error('Entity not found: %s', entity_id)
return web.Response(text="Entity not found", status=404)
if not self.config.is_entity_exposed(entity):
_LOGGER.error('Entity not exposed: %s', entity_id)
return web.Response(text="Entity not exposed", status=404)
state, brightness = get_entity_state | (self.config, entity)
json_response = entity_to_json(entity, state, brightness)
return self.json(json_response)
class HueOneLightChangeView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights/{entity_number}/state'
name = 'emulated_hue:light:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@asyncio.coroutin | e
def put(self, request, username, entity_number):
"""Process a request to set the state of an individual light."""
config = self.config
hass = request.app['hass']
entity_id = config.number_to_entity_id(entity_number)
if entity_id is None:
_LOGGER.error('Unknown entity number: %s', entity_number)
return self.json_message('Entity not found', HTTP_NOT_FOUND)
entity = hass.states.get(entity_id)
if entity is None:
_LOGGER.error('Entity not found: %s', entity_id)
return self.json_message('Entity not found', HTTP_NOT_FOUND)
if not config.is_entity_exposed(entity):
_LOGGER.error('Entity not exposed: %s', entity_id)
return web.Response(text="Entity not exposed", status=404)
try:
request_json = yield from request.json()
except ValueError:
_LOGGER.error('Received invalid json')
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
# Parse the request into requested "on" status and brightness
parsed = parse_hue_api_put_light_body(request_json, entity)
if parsed is None:
_LOGGER.error('Unable to parse data: %s', request_json)
return web.Response(text="Bad request", status=400)
result, brightness = parsed
# Choose general HA domain
domain = core.DOMAIN
# Entity needs separate call to turn on
turn_on_needed = False
# Convert the resulting "on" status into the service we need to call
service = SERVICE_TURN_ON if result else SERVICE_TURN_OFF
# Construct what we need to send to the service
data = {ATTR_ENTITY_ID: entity_id}
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if entity.domain == "light":
if entity_features & SUPPORT_BRIGHTNESS:
if brightness is not None:
data[ATTR_BRIGHTNESS] = brightness
# If the requested entity is a script add some variables
elif entity.domain == "script":
data['variables'] = {
'requested_state': STATE_ON if result else STATE_OFF
}
if brightness is not None:
data['variables']['requested_level'] = brightness
# If the requested entity is a media player, convert to volume
elif entity.domain == "media_player":
if entity_features & SUPPORT_VOLUME_SET:
if brightness is not None:
turn_on_needed = True
domain = entity.domain
service = SERVICE_VOLUME_SET
# Convert 0-100 to 0.0-1.0
data[ATTR_MEDIA_VOLUME_LEVEL] = brightness / 100.0
# If the requested entity is a cover, convert to open_cover/close_cover
elif entity.domain == "cover":
domain = entity.domain
if service == SERVICE_TURN_ON:
service = SERVICE_OPEN_COVER
else:
service = SERVICE_CLOSE_COVER
# If the requested entity is a fan, convert to speed
elif entity.domain == "fan":
if entity_features & SUPPORT_SET_SPEED:
if brightness is not None:
domain = entity.domain
# Convert 0-100 to a fan speed
if brightness == 0:
data[ATTR_SPEED] = SPEED_OFF
elif brightness <= 33.3 and brightness > 0:
data[ATTR_SPEED] = SPEED_LOW
elif brightness <= 66.6 and brightness > 33.3:
data[ATTR_SPEED] = SPEED_MEDIUM
elif brightness <= 100 and brightness > 66.6:
data[ATTR_SPEED] = SPEED_HIGH
if entity.domain in config.off_maps_to_on_domains:
# Map the off command to on
service = SERVICE_TURN_ON
|
sjdv1982/seamless | docs/archive/documentation-OLD/sphinx-source/conf.py | Python | mit | 5,272 | 0.000948 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# seamless documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 3 17:16:13 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'seamless'
copyright = '2016-2017, Sjoerd de Vries'
author = 'Sjoerd de Vries'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'seamlessdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. Lis | t of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'seamless.tex', 'seamless documentation',
'Sjoerd de Vries', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, | description, authors, manual section).
man_pages = [
(master_doc, 'seamless', 'seamless documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'seamless', 'seamless documentation',
author, 'seamless', 'One line description of project.',
'Miscellaneous'),
]
|
xiaoguoai/ec-dev-swift | swift/common/utils.py | Python | apache-2.0 | 85,284 | 0.000176 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance wit | h the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# | Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous utility functions for use with Swift."""
import errno
import fcntl
import grp
import hmac
import operator
import os
import pwd
import re
import sys
import threading as stdlib_threading
import time
import uuid
import functools
from hashlib import md5, sha1
from random import random, shuffle
from urllib import quote as _quote
from contextlib import contextmanager, closing
import ctypes
import ctypes.util
from ConfigParser import ConfigParser, NoSectionError, NoOptionError, \
RawConfigParser
from optparse import OptionParser
from Queue import Queue, Empty
from tempfile import mkstemp, NamedTemporaryFile
try:
import simplejson as json
except ImportError:
import json
import cPickle as pickle
import glob
from urlparse import urlparse as stdlib_urlparse, ParseResult
import itertools
import stat
import eventlet
import eventlet.semaphore
from eventlet import GreenPool, sleep, Timeout, tpool, greenthread, \
greenio, event
from eventlet.green import socket, threading
import eventlet.queue
import netifaces
import codecs
utf8_decoder = codecs.getdecoder('utf-8')
utf8_encoder = codecs.getencoder('utf-8')
from swift import gettext_ as _
from swift.common.exceptions import LockTimeout, MessageTimeout
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND
# logging doesn't import patched as cleanly as one would like
from logging.handlers import SysLogHandler
import logging
logging.thread = eventlet.green.thread
logging.threading = eventlet.green.threading
logging._lock = logging.threading.RLock()
# setup notice level logging
NOTICE = 25
logging._levelNames[NOTICE] = 'NOTICE'
SysLogHandler.priority_map['NOTICE'] = 'notice'
# These are lazily pulled from libc elsewhere
_sys_fallocate = None
_posix_fadvise = None
# If set to non-zero, fallocate routines will fail based on free space
# available being at or below this amount, in bytes.
FALLOCATE_RESERVE = 0
# Used by hash_path to offer a bit more security when generating hashes for
# paths. It simply appends this value to all paths; guessing the hash a path
# will end up with would also require knowing this suffix.
HASH_PATH_SUFFIX = ''
HASH_PATH_PREFIX = ''
SWIFT_CONF_FILE = '/etc/swift/swift.conf'
class InvalidHashPathConfigError(ValueError):
def __str__(self):
return "[swift-hash]: both swift_hash_path_suffix and " \
"swift_hash_path_prefix are missing from %s" % SWIFT_CONF_FILE
def validate_hash_conf():
global HASH_PATH_SUFFIX
global HASH_PATH_PREFIX
if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX:
hash_conf = ConfigParser()
if hash_conf.read(SWIFT_CONF_FILE):
try:
HASH_PATH_SUFFIX = hash_conf.get('swift-hash',
'swift_hash_path_suffix')
except (NoSectionError, NoOptionError):
pass
try:
HASH_PATH_PREFIX = hash_conf.get('swift-hash',
'swift_hash_path_prefix')
except (NoSectionError, NoOptionError):
pass
if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX:
raise InvalidHashPathConfigError()
try:
validate_hash_conf()
except InvalidHashPathConfigError:
# could get monkey patched or lazy loaded
pass
def get_hmac(request_method, path, expires, key):
"""
Returns the hexdigest string of the HMAC-SHA1 (RFC 2104) for
the request.
:param request_method: Request method to allow.
:param path: The path to the resource to allow access to.
:param expires: Unix timestamp as an int for when the URL
expires.
:param key: HMAC shared secret.
:returns: hexdigest str of the HMAC-SHA1 for the request.
"""
return hmac.new(
key, '%s\n%s\n%s' % (request_method, expires, path), sha1).hexdigest()
# Used by get_swift_info and register_swift_info to store information about
# the swift cluster.
_swift_info = {}
_swift_admin_info = {}
def get_swift_info(admin=False, disallowed_sections=None):
"""
Returns information about the swift cluster that has been previously
registered with the register_swift_info call.
:param admin: boolean value, if True will additionally return an 'admin'
section with information previously registered as admin
info.
:param disallowed_sections: list of section names to be withheld from the
information returned.
:returns: dictionary of information about the swift cluster.
"""
disallowed_sections = disallowed_sections or []
info = {}
for section in _swift_info:
if section in disallowed_sections:
continue
info[section] = dict(_swift_info[section].items())
if admin:
info['admin'] = dict(_swift_admin_info)
info['admin']['disallowed_sections'] = list(disallowed_sections)
return info
def register_swift_info(name='swift', admin=False, **kwargs):
"""
Registers information about the swift cluster to be retrieved with calls
to get_swift_info.
:param name: string, the section name to place the information under.
:param admin: boolean, if True, information will be registered to an
admin section which can optionally be withheld when
requesting the information.
:param kwargs: key value arguments representing the information to be
added.
"""
if name == 'admin' or name == 'disallowed_sections':
raise ValueError('\'{0}\' is reserved name.'.format(name))
if admin:
dict_to_use = _swift_admin_info
else:
dict_to_use = _swift_info
if name not in dict_to_use:
dict_to_use[name] = {}
for key, val in kwargs.iteritems():
dict_to_use[name][key] = val
def backward(f, blocksize=4096):
"""
A generator returning lines from a file starting with the last line,
then the second last line, etc. i.e., it reads lines backwards.
Stops when the first line (if any) is read.
This is useful when searching for recent activity in very
large files.
:param f: file object to read
:param blocksize: no of characters to go backwards at each block
"""
f.seek(0, os.SEEK_END)
if f.tell() == 0:
return
last_row = ''
while f.tell() != 0:
try:
f.seek(-blocksize, os.SEEK_CUR)
except IOError:
blocksize = f.tell()
f.seek(-blocksize, os.SEEK_CUR)
block = f.read(blocksize)
f.seek(-blocksize, os.SEEK_CUR)
rows = block.split('\n')
rows[-1] = rows[-1] + last_row
while rows:
last_row = rows.pop(-1)
if rows and last_row:
yield last_row
yield last_row
# Used when reading config values
TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y'))
def config_true_value(value):
"""
Returns True if the value is either True or a string in TRUE_VALUES.
Returns False otherwise.
"""
return value is True or \
(isinstance(value, basestring) and value.lower() in TRUE_VALUES)
def config_auto_int_value(value, default):
"""
Returns default if value is None or 'auto'.
Returns value as an int or raises ValueError otherwise.
"""
if value is None or \
(isinstance(value, basestring) and value.lower() == 'auto'):
return default
try:
value = int(value)
except (TypeError, ValueError):
raise ValueErro |
sapcc/monasca-notification | tests/test_hipchat_notification.py | Python | apache-2.0 | 3,335 | 0.0009 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import Queue
import unittest
from monasca_notification import notification as m_notification
from monasca_notification.plugins import hipchat_notifier
def alarm(metrics):
return {"tenantId": "0",
"alarmId": "0",
"alarmDefinitionId": 0,
"alarmName": "test Alarm" | ,
"alarmDescription": "test Alarm description",
"oldState": "OK",
"newState": "ALARM",
"severity": "CRITI | CAL",
"link": "some-link",
"lifecycleState": "OPEN",
"stateChangeReason": "I am alarming!",
"timestamp": 1429023453632,
"metrics": metrics}
class requestsResponse(object):
def __init__(self, status):
self.status_code = status
class TestHipchat(unittest.TestCase):
def setUp(self):
self.trap = Queue.Queue()
self.hipchat_config = {'timeout': 50}
def tearDown(self):
self.assertTrue(self.trap.empty())
def _http_post_200(self, url, data, **kwargs):
self.trap.put(url)
self.trap.put(data)
r = requestsResponse(200)
return r
@mock.patch('monasca_notification.plugins.hipchat_notifier.requests')
def notify(self, http_func, mock_requests):
mock_log = mock.MagicMock()
mock_log.warn = self.trap.put
mock_log.error = self.trap.put
mock_log.exception = self.trap.put
mock_requests.post = http_func
hipchat = hipchat_notifier.HipChatNotifier(mock_log)
hipchat.config(self.hipchat_config)
metric = []
metric_data = {'dimensions': {'hostname': 'foo1', 'service': 'bar1'}}
metric.append(metric_data)
alarm_dict = alarm(metric)
notification = m_notification.Notification(0, 'hipchat', 'hipchat notification',
'http://mock:3333/', 0, 0, alarm_dict)
self.trap.put(hipchat.send_notification(notification))
def test_hipchat_success(self):
"""hipchat success
"""
self.notify(self._http_post_200)
url = self.trap.get(timeout=1)
data = self.trap.get(timeout=1)
self.valid_hipchat_message(url, data)
return_value = self.trap.get()
self.assertTrue(return_value)
def valid_hipchat_message(self, url, data):
self.assertEqual(url, "http://mock:3333/")
self.assertEqual(data.get('color'), 'red')
self.assertEqual(data.get('message_format'), 'text')
message = json.loads(data.get('message'))
self.assertEqual(message.get('message'), 'I am alarming!')
self.assertEqual(message.get('alarm_name'), 'test Alarm')
self.assertEqual(message.get('alarm_description'), 'test Alarm description')
|
dlab-projects/python-taq | marketflow/dsf_with_sic.py | Python | bsd-2-clause | 887 | 0.036077 | class DSF_SIC_Map(object):
"""docstring for SIC_Map"""
def __init__(self, dsffile = 'crsp/dsf.csv', sicfile = 'sic_codes.txt'):
self.dsf = pd.read_csv("dsf.csv", dtype = {'CUSIP': np.str, 'PRC': np.float}, na_values = {'PRC': '-'})
self.sic = pd.read_table(sicfile, header = 1)
self.sic.columns = ['HSICCD', 'SICNAME']
def pr | ocess(self, day = 20100101, columns = ['PERMNO', 'DATE', 'PRC', 'VOL', 'SHROUT', 'RET', 'HSICCD']):
self.dsf_startdate(date = day)
self.dsf_subset(to_keep = columns)
self.sic_merge()
def dsf_startdate(self, date = 20100101):
self.dsf = self.dsf[self.dsf.DATE >= date]
def dsf_subset(self, to_keep = ['PERMNO', 'DATE', 'PRC', 'VOL', 'SHROUT', 'RET', 'HSICCD']):
| self.dsf = self.dsf[to_keep]
def sic_merge(self):
self.clean_dsf = self.dsf.merge(self.sic, how = "left") |
strange/django-simple-comments | simple_comments/comments.py | Python | bsd-3-clause | 15,043 | 0.002792 | import datetime
from django import http
from django.conf import settings
from django.forms.models import modelform_factory
from django.shortcuts import get_object_or_404
from django.views.generic.list_detail import object_list
from django.views.generic.simple import direct_to_template
from django.core.urlresolvers import reverse
from simple_comments import forms as comment_forms
NOTIFICATION_LABEL = 'simple_comments_comment'
class CommentConfiguration(object):
"""A set of basic configuration options for handling comments. Subclass
this class to create your own custom behaviour.
There are three builtin levels of spam prevention: ``use_akismet``,
``use_control_question`` and ``use_honeypot`` are all boolean attributes
that allows enabling of spam prevention. Override
``get_spam_prevention_forms`` in a subclass to add custom spam prevention
mechanisms.
``user_comments`` determines whether a user must be registered in order to
post comments. It will also remove the need for a poster to fill in
otherwise mandatory fields such as name and email.
``user_can_delete`` determines if users should be able to delete their own
comments or not.
``autoclose_after`` determines how many days should pass after the target
was created before comments should be closed. ``autoclose_field_name`` is
the name of a date/datetime field on the target model that specifies when
the target was created.
``prevent_duplicates`` dictates whether some measures should be taken
against duplicate comments.
``allow_comments_field_name`` is a boolean field on the target model that,
when evaluating to ``False``, prevents comments from being posted.
``send_notifications`` dictates whether notifications should be sent or
not.
"""
template_object_name = 'comment'
preview_template_name = 'simple_comments/comment_preview.html'
form_template_name = 'simple_comments/comment_form.html'
list_template_name = 'simple_comments/comment_list.html'
deleted_template_namae = 'simple_comments/comment_deleted.html'
posted_template_name = 'simple_comments/comment_posted.html'
use_akismet = False
use_control_question = False
use_honeypot = False
user_comments = False
user_can_delete = False
autoclose_after = None
autoclose_after_field_name = None
prevent_duplicates = True
allow_comments_field_name = None
send_notifications = False
# require_moderation = False
# confirm_delete = True
# comment_markup
order_by = 'pub_date'
paginate_by = 25
def __init__(self, configuration_key, model):
self.configuration_key = configuration_key
self.model = model
def get_exclude(self):
"""Return a list of fields to exclude when generating a form using
``get_form()``. Defaults to the basic fields of the ``BaseComment`` we
want to exclude.
Subclasses may override this method to alter the list of fields to
exlcude, albeit it's probably easier to just set an ``exclude``
attribute.
"""
exclude = ['user', 'user_username', 'pub_date', 'ip_address', 'target']
if self.user_comments:
exclude = exclude + ['author_name', 'author_email',
'author | _website']
return exclude
exclude = property(fget=lambda self: self.get_exclude())
def days_since_target_was_published(self, target):
"""Return the number of days that have passed since ``target`` was
published.
"""
now = datetime.datetime.now()
published = getattr(target, self.autoclose_after_field_name)
diff = datetime.dat | e(now.year, now.month, now.day) - \
datetime.date(published.year, published.month, published.day)
return diff.days
def allow_comments(self, target):
"""Return a boolean dictating whether comments are allowed for
``target`` or not.
"""
if self.allow_comments_field_name is not None and \
not getattr(target, self.allow_comments_field_name):
return False
if self.autoclose_after_field_name is not None and \
self.autoclose_after is not None:
days_since = self.days_since_target_was_published(target)
return days_since < self.autoclose_after
return True
def get_target_owner(self, target):
"""Return the owner (``User`` instance) of target."""
return None
def get_duplicate(self, target, comment):
"""Try to determine if a duplicate of `comment` exists. If entries
posted by the same author, with the same content, exist for the same
day the latest "duplicate" record is returned. Otherwise return
``None``.
This method should be overridden if a custom model (that requires
custom checks) is used.
"""
filter_kwargs = {
'user': comment.user,
'author_name': comment.author_name,
'author_email': comment.author_email,
'author_website': comment.author_website,
'target': target,
}
queryset = comment._default_manager.filter(**filter_kwargs)
queryset = queryset.order_by('-pub_date')
if queryset.count():
latest = queryset[0]
if latest.pub_date.date() == comment.pub_date.date() and \
latest.body == comment.body:
return latest
return None
def get_post_save_redirect_url(self, target, comment):
"""Return a URL to redirect to after a successful comment save."""
return reverse('simple_comments_comment_posted',
args=[self.configuration_key, target.pk, comment.pk])
def get_post_delete_redirect_url(self, target):
"""Return a URL to redirect to after a successful comment delete."""
return reverse('simple_comments_comment_deleted',
args=[self.configuration_key, target.pk])
def get_form(self):
"""Return a form-class to use when creating comments.
Subclasses can override this method to return a custom form.
"""
return modelform_factory(self.model, fields=None, exclude=self.exclude)
def get_spam_prevention_forms(self):
"""Return a list containing spam prevention forms."""
forms = []
if self.use_akismet:
forms.append(comment_forms.AkismetForm)
if self.use_control_question:
forms.append(comment_forms.EarTriviaForm)
if self.use_honeypot:
forms.append(comment_forms.HoneypotForm)
return forms
def has_permission_to_delete(self, comment, user, request=None):
"""Return a boolean dictating whether a user has permission to delete
a comment or not.
"""
if user is None or user.is_anonymous():
return False
target_owner = self.get_target_owner(comment.target)
if self.user_can_delete and \
(comment.user == user or target_owner == user):
return True
if request is not None:
opts = comment.target._meta
perm = '%s.%s' % (opts.app_label, opts.get_delete_permission())
if request.user.has_perm(perm):
return True
return False
def get_notification_users(self, target):
"""Return an iterable of ``User`` instances that should be notified
when a comment is made on ``target``.
"""
return [self.get_target_owner(target)]
def dispatch_notifications(self, comment):
users = self.get_notification_users(comment.target)
if not self.send_notifications or \
"notification" not in settings.INSTALLED_APPS or not users:
return False
from notification import models as notification
context = {
'comment': comment,
'verbose_name': comment.targe |
jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_rich_string01.py | Python | bsd-2-clause | 1,233 | 0 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'rich_string01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
| self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
sel | f.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
italic = workbook.add_format({'italic': 1})
worksheet.write('A1', 'Foo', bold)
worksheet.write('A2', 'Bar', italic)
worksheet.write_rich_string('A3', 'a', bold, 'bc', 'defg')
workbook.close()
self.assertExcelEqual()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.