repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ajaygarg84/sugar
|
src/jarabe/frame/clipboardpanelwindow.py
|
1
|
5219
|
# Copyright (C) 2007, One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
from urlparse import urlparse
import hashlib
from gi.repository import Gtk
from gi.repository import Gdk
from jarabe.frame.framewindow import FrameWindow
from jarabe.frame.clipboardtray import ClipboardTray
from jarabe.frame import clipboard
class ClipboardPanelWindow(FrameWindow):
def __init__(self, frame, orientation):
FrameWindow.__init__(self, orientation)
self._frame = frame
# Listening for new clipboard objects
# NOTE: we need to keep a reference to Gtk.Clipboard in order to keep
# listening to it.
self._clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self._clipboard.connect('owner-change', self._owner_change_cb)
self._clipboard_tray = ClipboardTray()
self._clipboard_tray.show()
self.append(self._clipboard_tray)
# Receiving dnd drops
self.drag_dest_set(0, [], 0)
self.connect('drag_motion', self._clipboard_tray.drag_motion_cb)
self.connect('drag_leave', self._clipboard_tray.drag_leave_cb)
self.connect('drag_drop', self._clipboard_tray.drag_drop_cb)
self.connect('drag_data_received',
self._clipboard_tray.drag_data_received_cb)
def _owner_change_cb(self, x_clipboard, event):
logging.debug('owner_change_cb')
if self._clipboard_tray.owns_clipboard():
return
cb_service = clipboard.get_instance()
result, targets = x_clipboard.wait_for_targets()
cb_selections = []
if not result:
return
target_is_uri = False
for target in targets:
if target not in ('TIMESTAMP', 'TARGETS',
'MULTIPLE', 'SAVE_TARGETS'):
logging.debug('Asking for target %s.', target)
if target == 'text/uri-list':
target_is_uri = True
selection = x_clipboard.wait_for_contents(target)
if not selection:
logging.warning('no data for selection target %s.', target)
continue
cb_selections.append(selection)
if target_is_uri:
uri = selection.get_uris()[0]
filename = uri[len('file://'):].strip()
md5 = self._md5_for_file(filename)
data_hash = hash(md5)
else:
data_hash = hash(selection.get_data())
if len(cb_selections) > 0:
key = cb_service.add_object(name="", data_hash=data_hash)
if key is None:
return
cb_service.set_object_percent(key, percent=0)
for selection in cb_selections:
self._add_selection(key, selection)
cb_service.set_object_percent(key, percent=100)
def _md5_for_file(self, file_name):
'''Calculate md5 for file data
Calculating block wise to prevent issues with big files in memory
'''
block_size = 8192
md5 = hashlib.md5()
f = open(file_name, 'r')
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
f.close()
return md5.digest()
def _add_selection(self, key, selection):
if not selection.get_data():
logging.warning('no data for selection target %s.',
selection.get_data_type())
return
selection_type = str(selection.get_data_type())
logging.debug('adding type ' + selection_type + '.')
cb_service = clipboard.get_instance()
if selection_type == 'text/uri-list':
uris = selection.get_uris()
if len(uris) > 1:
raise NotImplementedError('Multiple uris in text/uri-list' \
' still not supported.')
uri = uris[0]
scheme, netloc_, path_, parameters_, query_, fragment_ = \
urlparse(uri)
on_disk = (scheme == 'file')
cb_service.add_object_format(key,
selection_type,
uri,
on_disk)
else:
cb_service.add_object_format(key,
selection_type,
selection.get_data(),
on_disk=False)
|
gpl-2.0
| 9,051,991,144,524,601,000
| 35.496503
| 79
| 0.569266
| false
| 4.205479
| false
| false
| false
|
yCanta/yCanta
|
convert/higherpraise.py
|
1
|
2030
|
import urllib2
import re
def convert(input_url):
'''return a dictionary as documented in __init__.py '''
# Example higherpraise.com content
# <div aligni="center"><!-- #BeginEditable "1" -->
# <table attrs=". . . ">
# ...
# <h1 align="center"><b><font class="default"><u>
# Title text </u></font><b></h1>
# <h4 align="center"><b><font class="default"><u>
# Author </u></font><b></h1>
# ...
# <pre><strong>Text, more text<br>More text<br><br>Next chunk
# </strong></pre>
# OR
# <pre><strong>Text, more text
# More text
#
# Next Chunk
# </strong></pre>
# ...
# </table>
# <!-- #EndEditable -->
content = urllib2.urlopen(input_url).read()
tag = re.compile(r'\<.*?\>')
try:
song_title = tag.sub('', re.split('\\<.*?h1.*?\\>', content)[1]).strip()
except:
song_title = ''
try:
song_author = tag.sub('', re.split('\\<.*?h4.*?\\>', content)[1]).strip()
except:
song_author = ''
# now the real work -- parsing content into a song
try:
song_div = content.split('<pre>')[1].split('</pre>')[0].replace(' ', ' ')
except:
song_div = content.split('<PRE>')[1].split('</PRE>')[0].replace(' ', ' ')
song_div = tag.sub('', song_div.replace('<br>','\n').replace('<BR>',''))
chunks = []
chunk_types = []
lines = []
# Split into multiple chunks
chunk_list = re.split('\n[ \t\r\f\v]*?\n(?=\s*?\S)', song_div)
for chunk in chunk_list:
if chunk.strip() in (song_title or song_author):
continue
chunks.append(chunk)
chunk_types.append('verse')
# Leave as one chunk
# chunks.append(song_div)
# chunk_types.append('verse')
return dict(title=song_title, author=song_author, chunks=chunks, chunk_type=chunk_types)
if __name__ == '__main__':
import sys
d = convert(sys.argv[1])
# write the song:
print d['title']
print '-'*len(d['title'])
print
print 'Author:', d['author']
print
for chunk in d['chunks']:
print chunk
print
|
unlicense
| 8,676,968,065,586,122,000
| 24.061728
| 90
| 0.550246
| false
| 3.108729
| false
| false
| false
|
shlomozippel/ansible
|
lib/ansible/playbook/play.py
|
1
|
14133
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from ansible import utils
from ansible import errors
from ansible.playbook.task import Task
import shlex
import os
class Play(object):
__slots__ = [
'hosts', 'name', 'vars', 'vars_prompt', 'vars_files',
'handlers', 'remote_user', 'remote_port',
'sudo', 'sudo_user', 'transport', 'playbook',
'tags', 'gather_facts', 'serial', '_ds', '_handlers', '_tasks',
'basedir'
]
# to catch typos and so forth -- these are userland names
# and don't line up 1:1 with how they are stored
VALID_KEYS = [
'hosts', 'name', 'vars', 'vars_prompt', 'vars_files',
'tasks', 'handlers', 'user', 'port', 'include',
'sudo', 'sudo_user', 'connection', 'tags', 'gather_facts', 'serial'
]
# *************************************************
def __init__(self, playbook, ds, basedir):
''' constructor loads from a play datastructure '''
for x in ds.keys():
if not x in Play.VALID_KEYS:
raise errors.AnsibleError("%s is not a legal parameter in an Ansible Playbook" % x)
# TODO: more error handling
hosts = ds.get('hosts')
if hosts is None:
raise errors.AnsibleError('hosts declaration is required')
elif isinstance(hosts, list):
hosts = ';'.join(hosts)
self._ds = ds
self.playbook = playbook
self.basedir = basedir
self.vars = ds.get('vars', {})
self.vars_files = ds.get('vars_files', [])
self.vars_prompt = ds.get('vars_prompt', {})
self.vars = self._get_vars()
self.hosts = utils.template(basedir, hosts, self.vars)
self.name = ds.get('name', self.hosts)
self._tasks = ds.get('tasks', [])
self._handlers = ds.get('handlers', [])
self.remote_user = utils.template(basedir, ds.get('user', self.playbook.remote_user), self.vars)
self.remote_port = ds.get('port', self.playbook.remote_port)
self.sudo = ds.get('sudo', self.playbook.sudo)
self.sudo_user = utils.template(basedir, ds.get('sudo_user', self.playbook.sudo_user), self.vars)
self.transport = ds.get('connection', self.playbook.transport)
self.tags = ds.get('tags', None)
self.gather_facts = ds.get('gather_facts', None)
self.serial = int(utils.template_ds(basedir, ds.get('serial', 0), self.vars))
if isinstance(self.remote_port, basestring):
self.remote_port = utils.template(basedir, self.remote_port, self.vars)
self._update_vars_files_for_host(None)
self._tasks = self._load_tasks(self._ds.get('tasks', []))
self._handlers = self._load_tasks(self._ds.get('handlers', []))
if self.tags is None:
self.tags = []
elif type(self.tags) in [ str, unicode ]:
self.tags = [ self.tags ]
elif type(self.tags) != list:
self.tags = []
if self.sudo_user != 'root':
self.sudo = True
# *************************************************
def _load_tasks(self, tasks, vars={}, additional_conditions=[]):
''' handle task and handler include statements '''
results = []
for x in tasks:
task_vars = self.vars.copy()
task_vars.update(vars)
if 'include' in x:
tokens = shlex.split(x['include'])
items = ['']
included_additional_conditions = list(additional_conditions)
for k in x:
if k.startswith("with_"):
plugin_name = k[5:]
if plugin_name not in utils.plugins.lookup_loader:
raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
terms = utils.template_ds(self.basedir, x[k], task_vars)
items = utils.plugins.lookup_loader.get(plugin_name, basedir=self.basedir, runner=None).run(terms, inject=task_vars)
elif k.startswith("when_"):
included_additional_conditions.append(utils.compile_when_to_only_if("%s %s" % (k[5:], x[k])))
elif k in ("include", "vars", "only_if"):
pass
else:
raise errors.AnsibleError("parse error: task includes cannot be used with other directives: %s" % k)
if 'vars' in x:
task_vars.update(x['vars'])
if 'only_if' in x:
included_additional_conditions.append(x['only_if'])
for item in items:
mv = task_vars.copy()
mv['item'] = item
for t in tokens[1:]:
(k,v) = t.split("=", 1)
mv[k] = utils.template_ds(self.basedir, v, mv)
include_file = utils.template(self.basedir, tokens[0], mv)
data = utils.parse_yaml_from_file(utils.path_dwim(self.basedir, include_file))
results += self._load_tasks(data, mv, included_additional_conditions)
elif type(x) == dict:
results.append(Task(self,x,module_vars=task_vars, additional_conditions=additional_conditions))
else:
raise Exception("unexpected task type")
for x in results:
if self.tags is not None:
x.tags.extend(self.tags)
return results
# *************************************************
def tasks(self):
''' return task objects for this play '''
return self._tasks
def handlers(self):
''' return handler objects for this play '''
return self._handlers
# *************************************************
def _get_vars(self):
''' load the vars section from a play, accounting for all sorts of variable features
including loading from yaml files, prompting, and conditional includes of the first
file found in a list. '''
if self.vars is None:
self.vars = {}
if type(self.vars) not in [dict, list]:
raise errors.AnsibleError("'vars' section must contain only key/value pairs")
vars = {}
# translate a list of vars into a dict
if type(self.vars) == list:
for item in self.vars:
if getattr(item, 'items', None) is None:
raise errors.AnsibleError("expecting a key-value pair in 'vars' section")
k, v = item.items()[0]
vars[k] = v
else:
vars.update(self.vars)
if type(self.vars_prompt) == list:
for var in self.vars_prompt:
if not 'name' in var:
raise errors.AnsibleError("'vars_prompt' item is missing 'name:'")
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = var.get("private", True)
confirm = var.get("confirm", False)
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
if vname not in self.playbook.extra_vars:
vars[vname] = self.playbook.callbacks.on_vars_prompt (
vname, private, prompt, encrypt, confirm, salt_size, salt, default
)
elif type(self.vars_prompt) == dict:
for (vname, prompt) in self.vars_prompt.iteritems():
prompt_msg = "%s: " % prompt
if vname not in self.playbook.extra_vars:
vars[vname] = self.playbook.callbacks.on_vars_prompt(
varname=vname, private=False, prompt=prompt_msg, default=None
)
else:
raise errors.AnsibleError("'vars_prompt' section is malformed, see docs")
results = self.playbook.extra_vars.copy()
results.update(vars)
return results
# *************************************************
def update_vars_files(self, hosts):
''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
# now loop through all the hosts...
for h in hosts:
self._update_vars_files_for_host(h)
# *************************************************
def compare_tags(self, tags):
''' given a list of tags that the user has specified, return two lists:
matched_tags: tags were found within the current play and match those given
by the user
unmatched_tags: tags that were found within the current play but do not match
any provided by the user '''
# gather all the tags in all the tasks into one list
all_tags = []
for task in self._tasks:
all_tags.extend(task.tags)
# compare the lists of tags using sets and return the matched and unmatched
all_tags_set = set(all_tags)
tags_set = set(tags)
matched_tags = all_tags_set & tags_set
unmatched_tags = all_tags_set - tags_set
return matched_tags, unmatched_tags
# *************************************************
def _has_vars_in(self, msg):
return ((msg.find("$") != -1) or (msg.find("{{") != -1))
# *************************************************
def _update_vars_files_for_host(self, host):
if type(self.vars_files) != list:
self.vars_files = [ self.vars_files ]
if host is not None:
inject = {}
inject.update(self.playbook.inventory.get_variables(host))
inject.update(self.playbook.SETUP_CACHE[host])
for filename in self.vars_files:
if type(filename) == list:
# loop over all filenames, loading the first one, and failing if # none found
found = False
sequence = []
for real_filename in filename:
filename2 = utils.template(self.basedir, real_filename, self.vars)
filename3 = filename2
if host is not None:
filename3 = utils.template(self.basedir, filename2, inject)
filename4 = utils.path_dwim(self.basedir, filename3)
sequence.append(filename4)
if os.path.exists(filename4):
found = True
data = utils.parse_yaml_from_file(filename4)
if type(data) != dict:
raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
if host is not None:
if self._has_vars_in(filename2) and not self._has_vars_in(filename3):
# this filename has variables in it that were fact specific
# so it needs to be loaded into the per host SETUP_CACHE
self.playbook.SETUP_CACHE[host].update(data)
self.playbook.callbacks.on_import_for_host(host, filename4)
elif not self._has_vars_in(filename4):
# found a non-host specific variable, load into vars and NOT
# the setup cache
self.vars.update(data)
elif host is not None:
self.playbook.callbacks.on_not_import_for_host(host, filename4)
if found:
break
if not found and host is not None:
raise errors.AnsibleError(
"%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
)
else:
# just one filename supplied, load it!
filename2 = utils.template(self.basedir, filename, self.vars)
filename3 = filename2
if host is not None:
filename3 = utils.template(self.basedir, filename2, inject)
filename4 = utils.path_dwim(self.basedir, filename3)
if self._has_vars_in(filename4):
continue
new_vars = utils.parse_yaml_from_file(filename4)
if new_vars:
if type(new_vars) != dict:
raise errors.AnsibleError("%s must be stored as dictonary/hash: %s" % filename4)
if host is not None and self._has_vars_in(filename2) and not self._has_vars_in(filename3):
# running a host specific pass and has host specific variables
# load into setup cache
self.playbook.SETUP_CACHE[host].update(new_vars)
elif host is None:
# running a non-host specific pass and we can update the global vars instead
self.vars.update(new_vars)
|
gpl-3.0
| -539,593,946,497,060,400
| 42.352761
| 141
| 0.521333
| false
| 4.411049
| false
| false
| false
|
maribhez/DietasBot
|
fabfile.py
|
1
|
1342
|
# coding: utf-8
from fabric.api import *
import os
#Paso inicial para poner a punto nuestra maquina.
def Instala():
#Aseguramos la limpieza de la maquina.
run ('sudo rm -rf DietasBot')
#Descargamos nuestra aplicacion desde GitHub.
run('git clone https://github.com/maribhez/DietasBot.git')
#Entramos a la carpeta recien creada e instalamos los requisitos.
run('cd DietasBot && pip install -r requirements.txt')
#Funcion para lanzar nuestra aplicacion.
def Ejecutar():
with shell_env(HOST_BD=os.environ['HOST_BD'],
USER_BD=os.environ['USER_BD'],
PASS_BD=os.environ['PASS_BD'],
NAME_BD=os.environ['NAME_BD'],
TOKENBOT=os.environ['TOKENBOT']
):
run('sudo supervisorctl start botdietas')
def Recargar():
run("sudo supervisorctl reload")
def Detener():
run ('sudo supervisorctl stop botdietas')
def Borrado():
run ('sudo rm -rf DietasBot')
def Test():
with shell_env(HOST_BD=os.environ['HOST_BD'],
USER_BD=os.environ['USER_BD'],
PASS_BD=os.environ['PASS_BD'],
NAME_BD=os.environ['NAME_BD'],
TOKENBOT=os.environ['TOKENBOT']
):
run('cd DietasBot/botDietas && python test_bot.py')
|
gpl-3.0
| 6,034,723,090,547,334,000
| 28.822222
| 69
| 0.590164
| false
| 3.218225
| false
| false
| false
|
evilhero/mylar
|
lib/comictaggerlib/comicapi/comet.py
|
1
|
9192
|
"""A class to encapsulate CoMet data"""
# Copyright 2012-2014 Anthony Beville
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.ElementTree as ET
#from datetime import datetime
#from pprint import pprint
#import zipfile
from genericmetadata import GenericMetadata
import utils
class CoMet:
writer_synonyms = ['writer', 'plotter', 'scripter']
penciller_synonyms = ['artist', 'penciller', 'penciler', 'breakdowns']
inker_synonyms = ['inker', 'artist', 'finishes']
colorist_synonyms = ['colorist', 'colourist', 'colorer', 'colourer']
letterer_synonyms = ['letterer']
cover_synonyms = ['cover', 'covers', 'coverartist', 'cover artist']
editor_synonyms = ['editor']
def metadataFromString(self, string):
tree = ET.ElementTree(ET.fromstring(string))
return self.convertXMLToMetadata(tree)
def stringFromMetadata(self, metadata):
header = '<?xml version="1.0" encoding="UTF-8"?>\n'
tree = self.convertMetadataToXML(self, metadata)
return header + ET.tostring(tree.getroot())
def indent(self, elem, level=0):
# for making the XML output readable
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def convertMetadataToXML(self, filename, metadata):
# shorthand for the metadata
md = metadata
# build a tree structure
root = ET.Element("comet")
root.attrib['xmlns:comet'] = "http://www.denvog.com/comet/"
root.attrib['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
root.attrib[
'xsi:schemaLocation'] = "http://www.denvog.com http://www.denvog.com/comet/comet.xsd"
# helper func
def assign(comet_entry, md_entry):
if md_entry is not None:
ET.SubElement(root, comet_entry).text = u"{0}".format(md_entry)
# title is manditory
if md.title is None:
md.title = ""
assign('title', md.title)
assign('series', md.series)
assign('issue', md.issue) # must be int??
assign('volume', md.volume)
assign('description', md.comments)
assign('publisher', md.publisher)
assign('pages', md.pageCount)
assign('format', md.format)
assign('language', md.language)
assign('rating', md.maturityRating)
assign('price', md.price)
assign('isVersionOf', md.isVersionOf)
assign('rights', md.rights)
assign('identifier', md.identifier)
assign('lastMark', md.lastMark)
assign('genre', md.genre) # TODO repeatable
if md.characters is not None:
char_list = [c.strip() for c in md.characters.split(',')]
for c in char_list:
assign('character', c)
if md.manga is not None and md.manga == "YesAndRightToLeft":
assign('readingDirection', "rtl")
date_str = ""
if md.year is not None:
date_str = str(md.year).zfill(4)
if md.month is not None:
date_str += "-" + str(md.month).zfill(2)
assign('date', date_str)
assign('coverImage', md.coverImage)
# need to specially process the credits, since they are structured
# differently than CIX
credit_writer_list = list()
credit_penciller_list = list()
credit_inker_list = list()
credit_colorist_list = list()
credit_letterer_list = list()
credit_cover_list = list()
credit_editor_list = list()
# loop thru credits, and build a list for each role that CoMet supports
for credit in metadata.credits:
if credit['role'].lower() in set(self.writer_synonyms):
ET.SubElement(
root,
'writer').text = u"{0}".format(
credit['person'])
if credit['role'].lower() in set(self.penciller_synonyms):
ET.SubElement(
root,
'penciller').text = u"{0}".format(
credit['person'])
if credit['role'].lower() in set(self.inker_synonyms):
ET.SubElement(
root,
'inker').text = u"{0}".format(
credit['person'])
if credit['role'].lower() in set(self.colorist_synonyms):
ET.SubElement(
root,
'colorist').text = u"{0}".format(
credit['person'])
if credit['role'].lower() in set(self.letterer_synonyms):
ET.SubElement(
root,
'letterer').text = u"{0}".format(
credit['person'])
if credit['role'].lower() in set(self.cover_synonyms):
ET.SubElement(
root,
'coverDesigner').text = u"{0}".format(
credit['person'])
if credit['role'].lower() in set(self.editor_synonyms):
ET.SubElement(
root,
'editor').text = u"{0}".format(
credit['person'])
# self pretty-print
self.indent(root)
# wrap it in an ElementTree instance, and save as XML
tree = ET.ElementTree(root)
return tree
def convertXMLToMetadata(self, tree):
root = tree.getroot()
if root.tag != 'comet':
raise 1
return None
metadata = GenericMetadata()
md = metadata
# Helper function
def xlate(tag):
node = root.find(tag)
if node is not None:
return node.text
else:
return None
md.series = xlate('series')
md.title = xlate('title')
md.issue = xlate('issue')
md.volume = xlate('volume')
md.comments = xlate('description')
md.publisher = xlate('publisher')
md.language = xlate('language')
md.format = xlate('format')
md.pageCount = xlate('pages')
md.maturityRating = xlate('rating')
md.price = xlate('price')
md.isVersionOf = xlate('isVersionOf')
md.rights = xlate('rights')
md.identifier = xlate('identifier')
md.lastMark = xlate('lastMark')
md.genre = xlate('genre') # TODO - repeatable field
date = xlate('date')
if date is not None:
parts = date.split('-')
if len(parts) > 0:
md.year = parts[0]
if len(parts) > 1:
md.month = parts[1]
md.coverImage = xlate('coverImage')
readingDirection = xlate('readingDirection')
if readingDirection is not None and readingDirection == "rtl":
md.manga = "YesAndRightToLeft"
# loop for character tags
char_list = []
for n in root:
if n.tag == 'character':
char_list.append(n.text.strip())
md.characters = utils.listToString(char_list)
# Now extract the credit info
for n in root:
if (n.tag == 'writer' or
n.tag == 'penciller' or
n.tag == 'inker' or
n.tag == 'colorist' or
n.tag == 'letterer' or
n.tag == 'editor'
):
metadata.addCredit(n.text.strip(), n.tag.title())
if n.tag == 'coverDesigner':
metadata.addCredit(n.text.strip(), "Cover")
metadata.isEmpty = False
return metadata
# verify that the string actually contains CoMet data in XML format
def validateString(self, string):
try:
tree = ET.ElementTree(ET.fromstring(string))
root = tree.getroot()
if root.tag != 'comet':
raise Exception
except:
return False
return True
def writeToExternalFile(self, filename, metadata):
tree = self.convertMetadataToXML(self, metadata)
# ET.dump(tree)
tree.write(filename, encoding='utf-8')
def readFromExternalFile(self, filename):
tree = ET.parse(filename)
return self.convertXMLToMetadata(tree)
|
gpl-3.0
| 3,675,850,606,253,734,400
| 32.304348
| 97
| 0.546997
| false
| 4.081705
| false
| false
| false
|
ArcaniteSolutions/truffe2
|
truffe2/vehicles/models.py
|
2
|
13930
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django import forms
from django.shortcuts import get_object_or_404
from generic.models import GenericModel, GenericStateModel, FalseFK, GenericGroupsModel, GenericStateRootValidable, GenericGroupsModerableModel, GenericContactableModel, SearchableModel
from rights.utils import AgepolyEditableModel, UnitEditableModel
from users.models import TruffeUser
class _Provider(GenericModel, AgepolyEditableModel, SearchableModel):
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = ['LOGISTIQUE', 'SECRETARIAT']
world_ro_access = False
name = models.CharField(_('Nom'), max_length=255)
description = models.TextField(_('Description'))
class MetaData:
list_display = [
('name', _(u'Nom')),
]
details_display = list_display + [
('description', _(u'Description')),
]
default_sort = "[1, 'asc']" # name
filter_fields = ('name', 'description')
base_title = _(u'Fournisseurs')
list_title = _(u'Liste des fournisseurs')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-suitcase'
menu_id = 'menu-vehicles-provider'
help_list = _(u"""Les entreprises fournissant des services de locations de véhicules.""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility véhicule'
fields = [
'name',
'description',
]
class Meta:
abstract = True
def __unicode__(self):
return self.name
def get_types(self):
return self.vehicletype_set.filter(deleted=False).order_by('name')
def get_cards(self):
return self.card_set.filter(deleted=False).order_by('name')
class _VehicleType(GenericModel, AgepolyEditableModel, SearchableModel):
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = ['LOGISTIQUE', 'SECRETARIAT']
world_ro_access = False
provider = FalseFK('vehicles.models.Provider', verbose_name=_('Fournisseur'))
name = models.CharField(_('Nom'), max_length=255)
description = models.TextField(_('Description'))
class MetaData:
list_display = [
('name', _(u'Nom')),
('provider', _(u'Fournisseur')),
]
details_display = list_display + [
('description', _(u'Description')),
]
default_sort = "[1, 'asc']" # name
filter_fields = ('name', 'description', 'provider__name')
base_title = _(u'Types de véhicule')
list_title = _(u'Liste des types de véhicules')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-truck'
menu_id = 'menu-vehicles-type'
help_list = _(u"""Les différents types de véhicules, par fournisseur""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility véhicule'
fields = [
'name',
'description',
'provider',
]
class Meta:
abstract = True
def __unicode__(self):
return self.name
class _Card(GenericModel, AgepolyEditableModel, SearchableModel):
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = ['LOGISTIQUE', 'SECRETARIAT']
world_ro_access = False
provider = FalseFK('vehicles.models.Provider', verbose_name=_('Fournisseur'))
name = models.CharField(_('Nom'), max_length=255)
number = models.CharField(_(u'Numéro'), max_length=255)
description = models.TextField(_('Description'))
exclusif = models.BooleanField(_('Usage exclusif'), default=True, help_text=_(u'Ne peut pas être utilisé plusieurs fois en même temps ?'))
class MetaData:
list_display = [
('name', _(u'Nom')),
('provider', _(u'Fournisseur')),
('number', _(u'Numéro')),
]
details_display = list_display + [
('description', _(u'Description')),
('exclusif', _(u'Usage exclusif'))
]
default_sort = "[1, 'asc']" # name
yes_or_no_fields = ['exclusif']
filter_fields = ('name', 'number', 'description', 'provider__name')
base_title = _(u'Cartes')
list_title = _(u'Liste des cartes')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-credit-card'
menu_id = 'menu-vehicles-cards'
help_list = _(u"""Les différentes cartes utilisées pour les réservations""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility véhicule'
fields = [
'name',
'description',
'provider',
'number',
]
class Meta:
abstract = True
def __unicode__(self):
return u'{} ({})'.format(self.name, self.number)
class _Location(GenericModel, AgepolyEditableModel, SearchableModel):
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = ['LOGISTIQUE', 'SECRETARIAT']
world_ro_access = False
name = models.CharField(_('Nom'), max_length=255)
description = models.TextField(_('Description'))
url_location = models.URLField(_('URL carte lieu'), blank=True, null=True)
class MetaData:
list_display = [
('name', _(u'Nom')),
]
details_display = list_display + [
('description', _(u'Description')),
('url_location', _(u'URL carte lieu')),
]
default_sort = "[1, 'asc']" # name
filter_fields = ('name', 'description')
base_title = _(u'Lieux')
list_title = _(u'Liste des lieux')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-location-arrow'
menu_id = 'menu-vehicles-location'
help_list = _(u"""Les lieux de récupération des locations""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility véhicule'
fields = [
'name',
'description',
]
class Meta:
abstract = True
def __unicode__(self):
return self.name
class _Booking(GenericModel, GenericGroupsModerableModel, GenericGroupsModel, GenericContactableModel, GenericStateRootValidable, GenericStateModel, UnitEditableModel, SearchableModel):
class MetaRightsUnit(UnitEditableModel.MetaRightsUnit):
access = 'LOGISTIQUE'
moderation_access = 'SECRETARIAT'
unit = FalseFK('units.models.Unit')
title = models.CharField(_(u'Titre'), max_length=255)
responsible = models.ForeignKey(TruffeUser, verbose_name=_(u'Responsable'))
reason = models.TextField(_(u'Motif'))
remark = models.TextField(_(u'Remarques'), blank=True, null=True)
remark_agepoly = models.TextField(_(u'Remarques AGEPoly'), blank=True, null=True)
provider = FalseFK('vehicles.models.Provider', verbose_name=_(u'Fournisseur'))
vehicletype = FalseFK('vehicles.models.VehicleType', verbose_name=_(u'Type de véhicule'))
card = FalseFK('vehicles.models.Card', verbose_name=_(u'Carte'), blank=True, null=True)
location = FalseFK('vehicles.models.Location', verbose_name=_(u'Lieu'), blank=True, null=True)
start_date = models.DateTimeField(_(u'Début de la réservation'))
end_date = models.DateTimeField(_(u'Fin de la réservation'))
class MetaData:
list_display = [
('title', _('Titre')),
('start_date', _(u'Date début')),
('end_date', _('Date fin')),
('provider', _('Fournisseur')),
('vehicletype', _(u'Type de véhicule')),
('status', _('Statut')),
]
details_display = list_display + [
('responsible', _('Responsable')),
('reason', _('Motif')),
('remark', _('Remarques')),
('remark_agepoly', _('Remarques AGEPoly')),
('card', _('Carte')),
('get_location', _('Lieu')),
]
filter_fields = ('title', 'status')
base_title = _(u'Réservations de véhicule')
list_title = _(u'Liste de toutes les réservations de véhicules')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-ambulance'
default_sort = "[3, 'desc']" # end_date
forced_widths = {
'1': '25%',
'2': '140px', # start date
'3': '140px', # end date
}
forced_widths_related = {
'1': '15%',
'2': '25%',
'4': '150px', # start date
'5': '150px', # end date
}
menu_id = 'menu-vehicles-booking'
menu_id_calendar = 'menu-vehicles-booking-calendar'
menu_id_calendar_related = 'menu-vehicles-booking-calendar-related'
datetime_fields = ['start_date', 'end_date']
safe_fields = ['get_location']
has_unit = True
help_list = _(u"""Les réservations de véhicules te permettent de demander la location d'un véhicule pour ton unité.
Ils sont soumis à validation par le secrétariat de l'AGEPoly. Il faut toujours faire les réservations le plus tôt possible !""")
help_list_related = _(u"""La liste de toutes les réservations de véhicules.""")
@staticmethod
def extra_args_for_edit(request, current_unit, current_year):
from vehicles.models import Provider
return {'providers': Provider.objects.filter(deleted=False).order_by('name')}
class MetaEdit:
datetime_fields = ('start_date', 'end_date')
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility véhicule réservation'
fields = [
'title',
'card',
'provider',
'location',
'vehicletype',
'responsible',
'remark',
'reason',
'remark_agepoly',
]
class MetaState(GenericStateRootValidable.MetaState):
states_texts = {
'0_draft': _(u'La réservation est en cours de création et n\'est pas publique.'),
'1_asking': _(u'La réservation est en cours de modération. Elle n\'est pas éditable. Sélectionner ce statut pour demander une modération !'),
'2_online': _(u'La résevation est validée. Elle n\'est pas éditable.'),
'3_archive': _(u'La réservation est archivée. Elle n\'est plus modifiable.'),
'4_deny': _(u'La modération a été refusée. Le véhicule n\'était probablement pas disponible.'),
}
def build_form_validation(request, obj):
from vehicles.models import Location
class FormValidation(forms.Form):
remark_agepoly = forms.CharField(label=_('Remarque'), widget=forms.Textarea, required=False)
card = forms.ModelChoiceField(label=_(u'Carte'), queryset=obj.provider.get_cards(), required=False)
location = forms.ModelChoiceField(label=_(u'Lieu'), queryset=Location.objects.filter(deleted=False).order_by('name'), required=False)
return FormValidation
states_bonus_form = {
'2_online': build_form_validation
}
def switch_status_signal(self, request, old_status, dest_status):
from vehicles.models import Location, Card
if dest_status == '2_online':
if request.POST.get('remark_agepoly'):
if self.remark_agepoly:
self.remark_agepoly += '\n' + request.POST.get('remark_agepoly')
else:
self.remark_agepoly = request.POST.get('remark_agepoly')
self.save()
if request.POST.get('card'):
self.card = get_object_or_404(Card, pk=request.POST.get('card'), provider=self.provider, deleted=False)
self.save()
if request.POST.get('location'):
self.location = get_object_or_404(Location, pk=request.POST.get('location'), deleted=False)
self.save()
s = super(_Booking, self)
if hasattr(s, 'switch_status_signal'):
s.switch_status_signal(request, old_status, dest_status)
class Meta:
abstract = True
def __unicode__(self):
return self.title
def get_location(self):
if self.location:
if self.location.url_location:
return u'<a href="{}">{}</a>'.format(self.location.url_location, self.location)
else:
return self.location.__unicode__()
else:
return ''
def genericFormExtraInit(self, form, current_user, *args, **kwargs):
"""Remove fields that should be edited by SECRETARIAT CDD only."""
if not self.rights_in_root_unit(current_user, 'SECRETARIAT'):
del form.fields['card']
del form.fields['location']
del form.fields['remark_agepoly']
unit_users_pk = map(lambda user: user.pk, self.unit.users_with_access())
form.fields['responsible'].queryset = TruffeUser.objects.filter(pk__in=unit_users_pk).order_by('first_name', 'last_name')
def genericFormExtraClean(self, data, form):
if 'provider' in data:
if 'card' in data and data['card']:
if data['card'].provider != data['provider']:
raise forms.ValidationError(_(u'La carte n\'est pas lié au fournisseur sélectionné'))
if 'vehiculetype' in data and data['vehiculetype']:
if data['vehiculetype'].provider != data['provider']:
raise forms.ValidationError(_(u'Le type de véhicule n\'est pas lié au fournisseur sélectionné'))
def conflicting_reservation(self):
return self.__class__.objects.exclude(pk=self.pk, deleted=True).filter(status__in=['2_online'], end_date__gt=self.start_date, start_date__lt=self.end_date)
|
bsd-2-clause
| -4,125,093,519,237,388,300
| 33.147783
| 185
| 0.59247
| false
| 3.675504
| false
| false
| false
|
jonnybazookatone/ADSimportpipeline
|
lib/conversions.py
|
1
|
1831
|
#!/usr/bin/env python
import ads
from ads.Looker import Looker
class ConvertBibcodes:
def __init__(self):
self.bib2alt = Looker(ads.alternates).look
self.bib2epr = Looker(ads.pub2arx).look
self.alt2bib = Looker(ads.altlist).look
self.epr2bib = Looker(ads.ematches).look
def getAlternates(self,bbc):
"""
Returns a list of alternate bibcodes for a record.
"""
if isinstance(bbc, list):
bibcode = bbc[0].strip()
else:
bibcode = bbc.strip()
alternates = []
res = self.bib2alt(bibcode).strip()
rez = self.bib2epr(bibcode).strip()
if res:
for line in res.split('\n'):
alternate = line.split('\t')[1]
if alternate != bibcode:
alternates.append(alternate)
if rez:
alternates.append(rez.strip().split('\n')[0].split('\t')[1])
return alternates
def Canonicalize(self,biblist,remove_matches=False):
"""
Convert a list of bibcodes into a list of canonical
bibcodes (canonical bibcodes remain unchanged).
Setting 'remove_matches' to True will remove e-print
bibcodes that have been matched
"""
if isinstance(biblist, str):
biblist = [biblist]
newlist = []
for bibcode in biblist:
res = self.alt2bib(bibcode).strip()
rez = self.epr2bib(bibcode).strip()
if res:
bibcode = res.strip().split('\n')[0].split('\t')[1]
elif rez and remove_matches:
bibcode = ''
elif rez:
bibcode = rez.strip().split('\n')[0].split('\t')[1]
if bibcode:
newlist.append(bibcode)
return list(set(newlist))
|
gpl-3.0
| -3,548,930,736,092,136,400
| 32.290909
| 72
| 0.541234
| false
| 3.830544
| false
| false
| false
|
nickraptis/fidibot
|
src/modules/help.py
|
1
|
2615
|
# Author: Nick Raptis <airscorp@gmail.com>
"""
Module for listing commands and help.
"""
from basemodule import BaseModule, BaseCommandContext
from alternatives import _
class HelpContext(BaseCommandContext):
def cmd_list(self, argument):
"""List commands"""
arg = argument.lower()
index = self.bot.help_index
public = "public commands -- %s" % " ".join(index['public'])
private = "private commands -- %s" % " ".join(index['private'])
if 'all' in arg or 'both' in arg:
output = "\n".join((public, private))
elif 'pub' in arg or self.target.startswith('#'):
output = public
elif 'priv' in arg or not self.target.startswith('#'):
output = private
else:
# we shouldn't be here
self.logger.error("cmd_list")
return
self.send(self.target, output)
def cmd_modules(self, argument):
"""List active modules"""
index = self.bot.help_index
output = "active modules -- %s" % " ".join(index['modules'].keys())
self.send(self.target, output)
def cmd_help(self, argument):
"""Get help on a command or module"""
arg = argument.lower()
index = self.bot.help_index
target = self.target
args = arg.split()
if not args:
s = "usage: help <command> [public|private] / help module <module>"
self.send(target, s)
elif args[0] == 'module':
args.pop(0)
if not args:
self.send(target, "usage: help module <module>")
else:
help_item = index['modules'].get(args[0])
if help_item:
self.send(target, help_item['summary'])
else:
self.send(target, _("No help for %s"), args[0])
else:
args.append("")
cmd = args.pop(0)
cmd_type = args.pop(0)
if 'pu' in cmd_type or self.target.startswith('#'):
cmd_type = 'public'
elif 'pr' in cmd_type or not self.target.startswith('#'):
cmd_type = 'private'
else:
# we shouldn't be here
self.logger.error("cmd_list")
return
help_item = index[cmd_type].get(cmd)
if help_item:
self.send(target, index[cmd_type][cmd]['summary'])
else:
self.send(target, _("No help for %s"), cmd)
class HelpModule(BaseModule):
context_class = HelpContext
module = HelpModule
|
bsd-2-clause
| -5,116,395,268,455,257,000
| 32.961039
| 79
| 0.521606
| false
| 4.035494
| false
| false
| false
|
google-research/leaf-audio
|
example/train.py
|
1
|
2594
|
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training loop using the LEAF frontend."""
import os
from typing import Optional
import gin
from leaf_audio import models
from example import data
import tensorflow as tf
import tensorflow_datasets as tfds
@gin.configurable
def train(workdir: str = '/tmp/',
dataset: str = 'speech_commands',
num_epochs: int = 10,
steps_per_epoch: Optional[int] = None,
learning_rate: float = 1e-4,
batch_size: int = 64,
**kwargs):
"""Trains a model on a dataset.
Args:
workdir: where to store the checkpoints and metrics.
dataset: name of a tensorflow_datasets audio datasset.
num_epochs: number of epochs to training the model for.
steps_per_epoch: number of steps that define an epoch. If None, an epoch is
a pass over the entire training set.
learning_rate: Adam's learning rate.
batch_size: size of the mini-batches.
**kwargs: arguments to the models.AudioClassifier class, namely the encoder
and the frontend models (tf.keras.Model).
"""
datasets, info = tfds.load(dataset, with_info=True)
datasets = data.prepare(datasets, batch_size=batch_size)
num_classes = info.features['label'].num_classes
model = models.AudioClassifier(num_outputs=num_classes, **kwargs)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = 'sparse_categorical_accuracy'
model.compile(loss=loss_fn,
optimizer=tf.keras.optimizers.Adam(learning_rate),
metrics=[metric])
ckpt_path = os.path.join(workdir, 'checkpoint')
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=ckpt_path,
save_weights_only=True,
monitor=f'val_{metric}',
mode='max',
save_best_only=True)
model.fit(datasets['train'],
validation_data=datasets['eval'],
batch_size=None,
epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
callbacks=[model_checkpoint_callback])
|
apache-2.0
| 7,620,625,562,801,573,000
| 35.027778
| 79
| 0.692367
| false
| 3.877429
| false
| false
| false
|
moagstar/xsorted
|
docs/conf.py
|
1
|
8799
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../xsorted")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'xsorted'
copyright = u'2017, Daniel Bradburn'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from xsorted import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'xsorted-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'xsorted Documentation',
u'Daniel Bradburn', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
|
mit
| -1,019,507,065,598,080,300
| 33.778656
| 85
| 0.690533
| false
| 3.689308
| false
| false
| false
|
buckets1337/MotherMUD
|
Objects.py
|
1
|
25773
|
# Objects.py
"""
This file defines the various different objects that can be found in the world
"""
#--------------------------
# This file defines the objects in the world. Best practice for naming an object follows: <region><Room><Name>, where the region is lowercase and every other word in the smashed string is capitalized
#--------------------------
import os
import World
import Globals
indexList = []
fromFileList = Globals.fromFileList
equipmentFromFile = Globals.equipmentFromFile
fileList = []
eqFileList = []
savedEqFileList = []
for region in Globals.RegionsList:
directoryFiles = os.listdir('blueprints/obj/'+str(region)+'/')
eqDirectoryFiles = os.listdir('blueprints/equip/'+str(region)+'/')
if os.path.exists('data/world/' + str(region) + '/equip/'):
savedEqFiles = os.listdir('data/world/' + str(region) + '/equip/')
else:
savedEqFiles = []
for obj in directoryFiles:
path = str(region)+'/'+obj
fileList.append(path)
for obj in eqDirectoryFiles:
path = str(region)+'/'+obj
eqFileList.append(path)
for obj in savedEqFiles:
path = str(region) + '/equip/' + obj
savedEqFileList.append(path)
def setLocation(location):
global room
room = location
def loadSavedEq():
'''
loads equipment into rooms from equipment definition files after server restart
'''
for region in Globals.regionListDict:
for room in Globals.regionListDict[region]:
path='data/world/' + region + '/equip/' + room + '/'
#shortPath='data/world' + region + '/equip/'
if os.path.exists(path):
Globals.regionListDict[region][room].equipment = {}
dirList = os.listdir(path)
for eqFile in dirList:
if not eqFile.endswith('~'):
newEq = buildEquipmentFromFile(eqFile, path)
# Globals.regionListDict[region][room].items.append(newEq)
def saveEq():
'''
handles saving all equipment in the world (but not player equipment) into unique equipment definition files when the server is shutdown
'''
for region in Globals.regionListDict:
for room in Globals.regionListDict[region]:
path='data/world/'+region+'/equip/'+room+'/'
shortPath='data/world/'+region+'/equip/'
if not os.path.exists(shortPath):
os.makedirs(shortPath)
if not os.path.exists(path):
os.makedirs(path)
dirList = os.listdir(path)
for eqFile in dirList:
print eqFile
os.remove(path+eqFile)
for eq in Globals.regionListDict[region][room].equipment:
saveEqToFile(eq, path)
def saveEqToFile(eq, path):
'''
handles saving a single bit of equipment to a unique equipment definition file when the server is shutdown
'''
eqType = ''
if hasattr(eq.kind.equipment, 'weapon'):
if eq.kind.equipment.weapon != None:
eqType = 'weapon'
if hasattr(eq.kind.equipment, 'armor'):
if eq.kind.equipment.armor != None:
eqType = 'armor'
battleCommands = []
if eq.kind.equipment.battleCommands != [''] and eq.kind.equipment.battleCommands != []:
for command in eq.kind.equipment.battleCommands:
battleCommands.append(command)
if battleCommands == []:
battleCommands = ''
itemGrabHandler = 'False'
if hasattr(eq.kind, 'itemGrabHandler'):
if eq.kind.itemGrabHandler != None:
itemGrabHandler = 'True'
notDroppable = 'False'
if hasattr(eq.kind, 'itemGrabHandler') and eq.kind.itemGrabHandler != None:
if eq.kind.itemGrabHandler.notDroppable:
notDroppable = 'True'
objectSpawner = 'False'
if hasattr(eq.kind, 'objectSpawner'):
if eq.kind.objectSpawner != None:
objectSpawner = 'True'
filePath = path + str(eq)
with open(filePath, 'w') as f:
f.write('ID=%s\n' %str(eq))
f.write('currentRoom=%s\n' %(str(eq.currentRoom.region)+ ":" +str(eq.currentRoom.name)))
f.write('\n')
f.write('name=%s\n' %eq.name)
f.write('type=%s\n' %eqType)
f.write('slot=%s\n' %eq.kind.equipment.slot)
f.write('\n')
f.write('durability=%s\n' %eq.kind.equipment.durability)
f.write('maxDurability=%s\n' %eq.kind.equipment.maxDurability)
f.write('worth=%s\n' %eq.kind.equipment.worth)
f.write('\n')
f.write('description=%s\n' %eq.description)
f.write('\n')
f.write('longDescription=%s\n' %eq.longDescription)
f.write('\n')
f.write('isVisible=%s\n' %eq.isVisible)
f.write('\n')
f.write('hp=%s\n' %eq.kind.equipment.hp)
f.write('pp=%s\n' %eq.kind.equipment.pp)
f.write('offense=%s\n' %eq.kind.equipment.offense)
f.write('defense=%s\n' %eq.kind.equipment.defense)
f.write('speed=%s\n' %eq.kind.equipment.speed)
f.write('guts=%s\n' %eq.kind.equipment.guts)
f.write('luck=%s\n' %eq.kind.equipment.luck)
f.write('vitality=%s\n' %eq.kind.equipment.vitality)
f.write('IQ=%s\n' %eq.kind.equipment.IQ)
f.write('\n')
f.write('battleCommands=%s\n' %battleCommands)
f.write('\n')
f.write('statusEffect=%s\n' %eq.kind.equipment.statusEffect)
f.write('\n')
f.write('onUse=%s\n' %eq.kind.equipment.onUse)
f.write('\n\n')
f.write('kind.isCarryable=%s\n' %eq.kind.isCarryable)
f.write('kind.respawns=%s\n' %eq.kind.respawns)
f.write('\n')
f.write('kind.itemGrabHandler=%s\n' %itemGrabHandler)
if itemGrabHandler == 'True':
f.write('kind.itemGrabHandler.notDroppable=%s\n' %notDroppable)
f.write('\n')
f.write('kind.objectSpawner=%s\n' %objectSpawner)
if objectSpawner == 'True':
f.write('kind.objectSpawner.time=%s\n' %eq.kind.objectSpawner.time)
f.write('kind.objectSpawner.spawnOdds=%s\n' %eq.kind.objectSpawner.spawnOdds)
f.write('kind.objectSpawner.container=%s\n' %eq.kind.objectSpawner.container)
f.write('kind.objectSpawner.cycles=%s\n' %eq.kind.objectSpawner.cycles)
f.write('kind.objectSpawner.repeat=%s\n' %eq.kind.objectSpawner.repeat)
f.write('kind.objectSpawner.active=%s\n' %eq.kind.objectSpawner.active)
def buildObjectFromFile(file):
'''
creates an object by constructing it out of details in a file
'''
print file
if str(file).endswith('~'):
print "\n"
return
path = 'blueprints/obj/' + file
with open(path, 'r') as f:
fileData = f.readlines()
newObject = World.Object('none', 'none')
print fileData
kind = None
isCarryable = None
isVisible = None
isLocked = False
respawns = None
objectSpawner = None
itemGrabHandler = None
repeat = None
time = None
spawnOdds = None
container = None
cycles = None
repeat = None
active = None
notDroppable = None
objectSpawnerComponent = None
itemGrabHandlerComponent = None
itemComponent = None
mobActive = None
mobCycles = None
mobMode = None
mobSpawnOdds = None
mobTime = None
mobFile = None
mobSpawner = None
onUse = None
for Data in fileData:
if Data.startswith('name='):
newObject.name = Data[6:-2]
if Data.startswith('description='):
newObject.description = Data[13:-2]
if Data.startswith('longDescription='):
newObject.longDescription = Data[17:-2]
if Data.startswith('isVisible='):
text = Data[10:-1]
if text == 'True':
newObject.isVisible = True
elif text == 'False':
newObject.isVisible = False
if Data.startswith('kind='):
text = Data[5:-1]
#print "kind:" + text
if text == 'item':
kind = 'item'
elif text == 'container':
kind = 'container'
if Data.startswith('kind.isCarryable='):
text = Data[17:-1]
#print "isCarryable:" +text
if text == 'True':
isCarryable = True
elif text == 'False':
isCarryable = False
if Data.startswith('kind.respawns='):
text = Data[14:-1]
if text == 'True':
respawns = True
elif text == 'False':
respawns = False
if Data.startswith('kind.isLocked='):
text = Data[14:-1]
if text == 'True':
isLocked = True
if text == 'False':
isLocked = False
if Data.startswith('kind.respawnContents='):
text = Data[21:-1]
if text == 'True':
respawnContents = True
elif text == 'False':
respawnContents = False
if Data.startswith('kind.objectSpawner='):
text = Data[19:-1]
if text == 'True':
objectSpawner = True
elif text == 'False':
objectSpawner = False
if Data.startswith('kind.objectSpawner.time='):
time = int(Data[24:-1])
if Data.startswith('kind.objectSpawner.spawnOdds='):
text = Data[29:-1]
oddsList = text.split(',')
#print "oddsList:" + str(oddsList)
nestedOddsList = []
for odds in oddsList:
nestedOddsList.append(odds.split(':'))
for oddsEntry in nestedOddsList:
oddsEntry[1] = int(oddsEntry[1])
if oddsEntry[0] == 'True':
oddsEntry[0] = True
elif oddsEntry[0] == 'False':
oddsEntry[0] = False
#print nestedOddsList
spawnOdds = nestedOddsList
if Data.startswith('kind.objectSpawner.container='):
text = Data[29:-1]
if text == 'None':
container = None
else:
container = text[1:-1] # this should be a reference to another object
container = container.split(', ')
if Data.startswith('kind.objectSpawner.cycles='):
cycles = int(Data[26:-1])
if Data.startswith('kind.objectSpawner.repeat='):
text = Data[26:-1]
if text == 'True':
repeat = True
elif text == 'False':
repeat = False
if Data.startswith('kind.objectSpawner.active='):
text = Data[26:-1]
#print "***active:" + text
if text == 'True':
active = True
elif text == 'False':
active = False
if Data.startswith('kind.itemGrabHandler='):
text = Data[21:-1]
#print "itemGrabHandler:" +text
if text == 'True':
itemGrabHandler = True
elif text == 'False':
itemGrabHandler = False
if Data.startswith('kind.itemGrabHandler.notDroppable='):
text = Data[34:-1]
#print "*** notDroppabletext:" + text
if text == 'True':
notDroppable = True
elif text == 'False':
notDroppable = False
if Data.startswith('kind.onUse='):
text = Data[11:-1]
onUse = text
if Data.startswith('mobSpawner='):
text = Data[11:-1]
if text == 'True':
mobSpawner = True
elif text == 'False':
mobSpawner = False
if Data.startswith('mobSpawner.mobFile='):
text = Data[19:-1]
mobFile = text
if Data.startswith('mobSpawner.time='):
text = Data[16:-1]
mobTime = int(text)
if Data.startswith('mobSpawner.oddsList='):
text = Data[20:-1]
oddsList = text.split(',')
#print "oddsList:" + str(oddsList)
nestedOddsList = []
for odds in oddsList:
nestedOddsList.append(odds.split(':'))
for oddsEntry in nestedOddsList:
oddsEntry[1] = int(oddsEntry[1])
if oddsEntry[0] == 'True':
oddsEntry[0] = True
elif oddsEntry[0] == 'False':
oddsEntry[0] = False
#print nestedOddsList
mobSpawnOdds = nestedOddsList
if Data.startswith('mobSpawner.mode='):
text = Data[16:-1]
print "mobModeff:" + text
mobMode = text
if Data.startswith('mobSpawner.cycles='):
text = Data[18:-1]
mobCycles = int(text)
if Data.startswith('mobSpawner.active='):
text = Data[18:-1]
if text == 'True':
mobActive = True
elif text == 'False':
mobActive = False
#print kind
if kind == 'item':
# print itemGrabHandler
# print objectSpawnerComponent
# print isCarryable
itemComponent = World.item()
itemComponent.owner = newObject
if kind == 'container':
itemComponent = World.container(inventory=[])
itemComponent.owner = newObject
if objectSpawner:
objectSpawnerComponent = World.objectSpawner(itemComponent, Globals.TIMERS, time, newObject, spawnOdds, container, cycles, repeat, active)
else:
objectSpawnerComponent = None
if itemGrabHandler:
itemGrabHandlerComponent = World.itemGrabHandler(notDroppable)
else:
itemGrabHandlerComponent = None
if mobSpawner:
mobFileMod = mobFile.split("/")
# print mobFileMod
# print Globals.mobsFromFile
# for mob in Globals.mobsFromFile:
# if mob.name == mobFileMod[1]:
# mobref = mob
#print mobMode
mobSpawnerComponent = World.mobSpawner(newObject, Globals.TIMERS, mobTime, mobFileMod[1], mobSpawnOdds, mobCycles, mode=mobMode, active=mobActive)
else:
mobSpawnerComponent = None
#print kind
if kind == 'item':
# print itemGrabHandler
# print objectSpawnerComponent
#print isCarryable
itemComponent.isCarryable = isCarryable
itemComponent.respawns = respawns
itemComponent.itemGrabHandler = itemGrabHandlerComponent
itemComponent.objectSpawner = objectSpawnerComponent
itemComponent.onUse = onUse
#itemComponent = World.item(isCarryable, respawns, itemGrabHandlerComponent, objectSpawnerComponent)
if kind == 'container':
itemComponent.isLocked = isLocked
itemComponent.isCarryable = isCarryable
itemComponent.respawns = respawns
itemComponent.respawnContents = respawnContents
itemComponent.itemGrabHandler = itemGrabHandlerComponent
itemComponent.objectSpawner = objectSpawnerComponent
itemComponent.inventory = []
#itemComponent = World.container(isLocked, isCarryable, respawns, respawnContents, itemGrabHandlerComponent, objectSpawnerComponent)
#print newObject.name
if kind is not None:
newObject.kind = itemComponent
if mobSpawner:
newObject.mobSpawner = mobSpawnerComponent
else:
newObject.mobSpawner = None
#print newObject.kind
fromFileList.append(newObject)
# printing suite
print "name:" + str(newObject.name)
print "description:" + str(newObject.description)
print "currentRoom:" + str(newObject.currentRoom)
print "isVisible:" + str(newObject.isVisible)
print "spawnContainer:" + str(newObject.spawnContainer)
print "longDescription:" + str(newObject.longDescription)
print "kind:" + str(newObject.kind)
print "TIMERS:" + str(newObject.TIMERS)
if newObject.kind is not None:
if isinstance(newObject.kind, World.item):
print "kind.isCarryable:" + str(newObject.kind.isCarryable)
print "kind.respawns:" + str(newObject.kind.respawns)
print "kind.itemGrabHandler:" + str(newObject.kind.itemGrabHandler)
print "kind.objectSpawner:" + str(newObject.kind.objectSpawner)
print "kind.onUse:" + str(newObject.kind.onUse)
if isinstance(newObject.kind, World.container):
print "kind.inventory:" + str(newObject.kind.inventory)
print "kind.isLocked:" + str(newObject.kind.isLocked)
print "kind.isCarryable:" + str(newObject.kind.isCarryable)
print "kind.respawns:" + str(newObject.kind.respawns)
print "kind.respawnContents:" + str(newObject.kind.respawnContents)
print "kind.itemGrabHandler:" + str(newObject.kind.itemGrabHandler)
print "kind.objectSpawner:" + str(newObject.kind.objectSpawner)
if newObject.kind.itemGrabHandler is not None:
print "kind.itemGrabHandler.notDroppable:" + str(newObject.kind.itemGrabHandler.notDroppable)
if newObject.kind.objectSpawner is not None:
print "kind.objectSpawner.owner:" + str(newObject.kind.objectSpawner.owner)
print "kind.objectSpawner.TIMERS:" + str(newObject.kind.objectSpawner.TIMERS)
print "kind.objectSpawner.time:" + str(newObject.kind.objectSpawner.time)
print "kind.objectSpawner.obj:" + str(newObject.kind.objectSpawner.obj)
print "kind.objectSpawner.oddsList:" + str(newObject.kind.objectSpawner.oddsList)
print "kind.objectSpawner.container:" + str(newObject.kind.objectSpawner.container)
print "kind.objectSpanwer.cycles:" + str(newObject.kind.objectSpawner.cycles)
print "kind.objectSpawner.repeat:" + str(newObject.kind.objectSpawner.repeat)
print "kind.objectSpawner.active:" + str(newObject.kind.objectSpawner.active)
print "kind.objectSpawner.timer:" + str(newObject.kind.objectSpawner.timer)
print "kind.objectSpawner.startingLocation:" + str(newObject.kind.objectSpawner.startingLocation)
print "mobSpawner:" + str(newObject.mobSpawner)
if newObject.mobSpawner is not None:
#print "mobSpawner.mobFile:" + str(newObject.mobSpawner.mobFile)
print "mobSpawner.time:" + str(newObject.mobSpawner.time)
print "mobSpawner.oddsList:" + str(newObject.mobSpawner.oddsList)
print "mobSpawner.mode:" + str(newObject.mobSpawner.mode)
print "mobSpawner.cycles:" + str(newObject.mobSpawner.cycles)
print "mobSpawner.active:" + str(newObject.mobSpawner.active)
print "\n"
def buildEquipmentFromFile(file, location):
print file
if str(file).endswith('~'):
print "\n"
return
path = location + file
with open(path, 'r') as f:
fileData = f.readlines()
newWeapon = None
newArmor = None
equipmentType = None
slot = None
durability = None
maxDurability = None
worth = None
description = None
longDescription = None
hp = None
pp = None
offense = None
defense = None
speed = None
guts = None
luck = None
vitality = None
IQ = None
battleCommands = None
statusEffect = None
onUse = None
isVisible = None
spawnContainer = None
isCarryable = None
respawns = None
itemGrabHandler = None
objectSpawner = None
notDroppable = None
container = None
spawnOdds = None
time = None
active = None
repeat = None
cycles = None
for Data in fileData:
if Data.startswith('type='):
equipmentType = Data[5:-1]
if Data.startswith('ID='):
ID = Data[3:-1]
if Data.startswith('name='):
name = Data[5:-1]
if Data.startswith('slot='):
slot = Data[5:-1]
if Data.startswith('durability='):
durability = Data[11:-1]
if Data.startswith('maxDurability='):
maxDurability = Data[14:-1]
if Data.startswith('worth='):
worth = Data[6:-1]
if Data.startswith('description='):
description = Data[12:-1]
if Data.startswith('longDescription='):
longDescription = Data[16:-1]
if Data.startswith('hp='):
hp = int(Data[3:-1])
if Data.startswith('pp='):
pp = int(Data[3:-1])
if Data.startswith('offense='):
offense = int(Data[8:-1])
if Data.startswith('defense='):
defense = int(Data[8:-1])
if Data.startswith('speed='):
speed = int(Data[6:-1])
if Data.startswith('guts='):
guts = int(Data[5:-1])
if Data.startswith('luck='):
luck = int(Data[5:-1])
if Data.startswith('vitality='):
vitality = int(Data[9:-1])
if Data.startswith('IQ='):
IQ = int(Data[3:-1])
if Data.startswith('battleCommands='):
battleCommands = Data[15:-1]
battleCommands = battleCommands.split(",")
if Data.startswith('statusEffect='):
statusEffect = Data[13:-1]
if Data.startswith('onUse='):
onUse = Data[6:-1]
if Data.startswith('isVisible='):
isVisible = Data[10:-1]
if isVisible == 'True':
isVisible = True
elif isVisible == 'False':
isVisible = False
if Data.startswith('kind.isCarryable='):
isCarryable = Data[17:-1]
if isCarryable == "True":
isCarryable = True
elif isCarryable == "False":
isCarryable = False
if Data.startswith('kind.respawns='):
respawns = Data[14:-1]
if respawns == "True":
respawns = True
elif respawns == "False":
respawns = False
if Data.startswith('kind.itemGrabHandler='):
itemGrabHandler = Data[21:-1]
if itemGrabHandler == "True":
itemGrabHandler = True
elif itemGrabHandler == "False":
itemGrabHandler = False
if Data.startswith('kind.itemGrabHandler.notDroppable='):
notDroppable = Data[34:-1]
if notDroppable == "True":
notDroppable = True
elif notDroppable == "False":
notDroppable = False
if Data.startswith('kind.objectSpawner='):
objectSpawner = Data[19:-1]
if objectSpawner == 'True':
objectSpawner = True
elif objectSpawner == 'False':
objectSpawner = False
if Data.startswith('kind.objectSpawner.time='):
time = int(Data[24:-1])
if Data.startswith('kind.objectSpawner.spawnOdds='):
text = Data[29:-1]
oddsList = text.split(',')
#print "oddsList:" + str(oddsList)
nestedOddsList = []
for odds in oddsList:
nestedOddsList.append(odds.split(':'))
for oddsEntry in nestedOddsList:
oddsEntry[1] = int(oddsEntry[1])
if oddsEntry[0] == 'True':
oddsEntry[0] = True
elif oddsEntry[0] == 'False':
oddsEntry[0] = False
#print nestedOddsList
spawnOdds = nestedOddsList
if Data.startswith('kind.objectSpawner.container='):
text = Data[29:-1]
if text == 'None':
container = None
else:
container = text[1:-1] # this should be a reference to another object
container = container.split(', ')
if Data.startswith('kind.objectSpawner.cycles='):
cycles = int(Data[26:-1])
if Data.startswith('kind.objectSpawner.repeat='):
text = Data[26:-1]
if text == 'True':
repeat = True
elif text == 'False':
repeat = False
if Data.startswith('kind.objectSpawner.active='):
text = Data[26:-1]
#print "***active:" + text
if text == 'True':
active = True
elif text == 'False':
active = False
if equipmentType == 'weapon':
newWeapon = World.weapon()
elif equipmentType == 'armor':
newArmor = World.armor()
if itemGrabHandler == True:
newItemGrabHandler = World.itemGrabHandler(notDroppable=notDroppable)
else:
newItemGrabHandler = None
if objectSpawner == True:
newObjectSpawner = World.objectSpawner(owner=None, TIMERS=Globals.TIMERS, time=time, obj=None, oddsList=oddsList, container=container, cycles=cycles, repeat=repeat, active=active)
else:
newObjectSpawner = None
newEquipment = World.equipment(owner=None, weapon=newWeapon, armor=newArmor, slot=slot, durability=durability, maxDurability=maxDurability, worth=worth, hp=hp, pp=pp, offense=offense, defense=defense, speed=speed, guts=guts, luck=luck, vitality=vitality, IQ=IQ, battleCommands=battleCommands, statusEffect=statusEffect, onUse=onUse)
newItem = World.item(isCarryable=isCarryable, respawns=respawns, itemGrabHandler=newItemGrabHandler, objectSpawner=newObjectSpawner, equipment=newEquipment, onUse=onUse)
if newItem.itemGrabHandler:
newItem.itemGrabHandler.owner = newItem
if newItem.objectSpawner:
newItem.objectSpawner.owner = newItem
newEquipment.owner = newItem
newObject = World.Object(name=name, description=description, isVisible=isVisible, spawnContainer=spawnContainer, longDescription=longDescription, kind=newItem)
if newObject.kind.objectSpawner:
newObject.kind.objectSpawner.obj = newObject
newObject.ID = ID
newItem.owner = newObject
equipmentFromFile.append(newObject)
print "\n"
print "name:" + str(newObject.name)
print "description:" + str(newObject.description)
print "currentRoom:" + str(newObject.currentRoom)
print "isVisible:" + str(newObject.isVisible)
print "spawnContainer:" + str(newObject.spawnContainer)
print "longDescription:" + str(newObject.longDescription)
print "kind:" + str(newObject.kind)
#print "TIMERS:" + str(newObject.TIMERS)
if newObject.kind is not None:
print "kind.owner:" + str(newObject.kind.owner)
print "kind.equipment:" + str(newObject.kind.equipment)
print "kind.equipment.owner" + str(newObject.kind.equipment.owner)
if hasattr(newObject.kind.equipment, 'weapon'):
if newObject.kind.equipment.weapon is not None:
print "weapon:" + str(newObject.kind.equipment.weapon)
if hasattr(newObject.kind.equipment, 'armor'):
if newObject.kind.equipment.armor is not None:
print "armor:" + str(newObject.kind.equipment.armor)
print "slot:" + str(newObject.kind.equipment.slot)
print "durability:" + str(newObject.kind.equipment.durability)
print "maxDurability:" + str(newObject.kind.equipment.maxDurability)
print "worth:" + str(newObject.kind.equipment.worth)
if newObject.kind.equipment.hp != 0:
print "hp:" + str(newObject.kind.equipment.hp)
if newObject.kind.equipment.pp != 0:
print "pp:" + str(newObject.kind.equipment.pp)
if newObject.kind.equipment.offense != 0:
print "offense:" + str(newObject.kind.equipment.offense)
if newObject.kind.equipment.defense != 0:
print "defense:" + str(newObject.kind.equipment.defense)
if newObject.kind.equipment.speed != 0:
print "speed:" + str(newObject.kind.equipment.speed)
if newObject.kind.equipment.guts != 0:
print "guts:" + str(newObject.kind.equipment.guts)
if newObject.kind.equipment.luck != 0:
print "luck:" + str(newObject.kind.equipment.luck)
if newObject.kind.equipment.vitality != 0:
print "vitality:" + str(newObject.kind.equipment.vitality)
if newObject.kind.equipment.IQ != 0:
print "IQ:" + str(newObject.kind.equipment.IQ)
if newObject.kind.equipment.statusEffect is not None:
if newObject.kind.equipment.statusEffect != '':
print "statusEffect:" + str(newObject.kind.equipment.statusEffect)
if newObject.kind.equipment.battleCommands is not None:
if newObject.kind.equipment.battleCommands != ['']:
print "battleCommands:" + str(newObject.kind.equipment.battleCommands)
if newObject.kind.equipment.onUse is not None:
if newObject.kind.equipment.onUse != '':
print "onUse:" + str(newObject.kind.equipment.onUse)
if newObject.kind.itemGrabHandler is not None:
print "kind.itemGrabHandler:" + str(newObject.kind.itemGrabHandler)
print "kind.itemGrabHandler.notDroppable:" + str(newObject.kind.itemGrabHandler.notDroppable)
if newObject.kind.objectSpawner is not None:
print "kind.objectSpawner:" + str(newObject.kind.objectSpawner)
print "kind.objectSpawner.owner:" + str(newObject.kind.objectSpawner.owner)
print "kind.objectSpawner.TIMERS:" + str(newObject.kind.objectSpawner.TIMERS)
print "kind.objectSpawner.time:" + str(newObject.kind.objectSpawner.time)
print "kind.objectSpawner.obj:" + str(newObject.kind.objectSpawner.obj)
print "kind.objectSpawner.oddsList:" + str(newObject.kind.objectSpawner.oddsList)
print "kind.objectSpawner.container:" + str(newObject.kind.objectSpawner.container)
print "kind.objectSpanwer.cycles:" + str(newObject.kind.objectSpawner.cycles)
print "kind.objectSpawner.repeat:" + str(newObject.kind.objectSpawner.repeat)
print "kind.objectSpawner.active:" + str(newObject.kind.objectSpawner.active)
print "kind.objectSpawner.timer:" + str(newObject.kind.objectSpawner.timer)
print "kind.objectSpawner.startingLocation:" + str(newObject.kind.objectSpawner.startingLocation)
print "\n"
return newObject
for obj in fileList:
buildObjectFromFile(obj)
print savedEqFileList
if savedEqFileList == []:
for obj in eqFileList:
buildEquipmentFromFile(obj, 'blueprints/equip/')
else:
loadSavedEq()
|
apache-2.0
| 8,426,455,827,758,730,000
| 31.665399
| 333
| 0.702867
| false
| 2.862077
| false
| false
| false
|
bjornaa/roppy
|
roppy/averator.py
|
1
|
1728
|
# -*- coding: utf-8 -*-
"""Generator for moving averages from ROMS file(s)"""
import numpy as np
def roms_averator(ncid, var_name, L, grd):
"""Generator for moving averages from ROMS file(s)
var_name : text string, name of NetCDF variable
ncid : an open NetCDF Dataset or MFDataset
grd : a roppy.SGrid instance
L : integer, length of averaging period (only even presently)
n_rec = len(fid.dimensions['ocean_time']) # Number of time records
"""
# TODO: Make grd optional
# Only use of grd is to look work on subdomain,
# alternatively: use subgrid specification
# make attribute grd.subgrid
N = L // 2
assert 2*N == L, "Only even averaging periods allowed (presently)"
# Dimension and staggering
if var_name == 'u': # 3D u-point
I, J = grd.Iu, grd.Ju
s = (slice(None), grd.Ju, grd.Iu)
elif var_name == 'v': # 3D v-point
I, J = grd.Iv, grd.Jv
s = (slice(None), grd.Jv, grd.Iv)
elif var_name == "ocean_time": # scalar
s = ()
else: # default = 3D rho-point
I, J = grd.I, grd.J
s = (slice(None), grd.J, grd.I)
# First average
MF = fid.variables[var_name][(0,) + s]/(4*N)
for t in range(1, 2*N):
MF += fid.variables[var_name][(t,) + s] / (2*N)
MF += fid.variables[var_name][(2*N,) + s]/(4*N)
yield MF
# Update the average
for t in range(N+1, n_rec - N):
MF += fid.variables[var_name][(t+N,) + s]/(4*N)
MF += fid.variables[var_name][(t+N-1,) + s]/(4*N)
MF -= fid.variables[var_name][(t-N,) + s]/(4*N)
MF -= fid.variables[var_name][(t-N-1,) + s]/(4*N)
yield MF
|
mit
| 2,815,143,418,121,291,300
| 29.315789
| 73
| 0.545718
| false
| 2.860927
| false
| false
| false
|
PrincetonML/AND4NMF
|
code/compute_error.py
|
1
|
1397
|
import numpy as np
from numpy.linalg import norm
def compute_error(A_in, Ag_in):
A = A_in
Ag = Ag_in
#reallign
D = A.shape[1]
inner = np.zeros((D, D))
for i in range(D):
for j in range(D):
inner[i, j] = np.asscalar(A[:, i].transpose() * Ag[:, j] )/(norm(A[:, i]) * norm(Ag[:, j]))
max = np.argmax(inner, axis = 0)
P = np.asmatrix(np.zeros((D, D)))
for i in range(D):
P[i, max[i]] = 1
# print "normalize the rows of A and A^*"
inv_norm_A = np.asarray(1.0 / np.apply_along_axis(norm, 0, A))
A = A * np.diag(inv_norm_A)
inv_norm_Ag = np.asarray(1.0 / np.apply_along_axis(norm, 0, Ag))
Ag = Ag * np.diag(inv_norm_Ag)
u = np.asmatrix(np.ones((1, D)))
#for each A_i^* we try to find the A_i that is closest to A_i^*
error = 0
for i in range(D):
Ag_i = Ag[:, i]
inner_product = np.asmatrix(Ag_i.transpose() * A)
norm_A = np.asmatrix(np.diag(A.transpose() * A))
z = np.divide(inner_product, norm_A).transpose()
z = np.asarray(z).flatten().transpose()
scalar = np.diag(z)
As = A * scalar
diff = np.apply_along_axis(norm, 0, As - Ag_i * u)
# min_idx = np.argmin(diff)
# print 'for Ag_%d: A_%d' % (i, min_idx)
difmin = np.amin(diff)
difmin = difmin * difmin
error = error + difmin
return error
|
mit
| -702,603,614,609,093,800
| 30.75
| 103
| 0.531854
| false
| 2.755424
| false
| false
| false
|
Tong-Chen/scikit-learn
|
sklearn/ensemble/gradient_boosting.py
|
1
|
44936
|
"""Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from warnings import warn
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, array2d, check_arrays, column_or_1d
from ..utils.extmath import logsumexp
from ..utils.fixes import unique
from ..externals import six
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import MSE, PresortBestSplitter
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0)")
self.alpha = alpha
def fit(self, X, y):
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y):
self.mean = np.mean(y)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
def fit(self, X, y):
n_pos = np.sum(y)
n_neg = y.shape[0] - n_pos
if n_neg == 0 or n_pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = np.log(n_pos / n_neg)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y):
class_counts = np.bincount(y)
self.priors = class_counts / float(y.shape[0])
def predict(self, X):
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self, X, y):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_mask, learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : np.ndarray, shape=(n, m)
The data array.
y : np.ndarray, shape=(n,)
The target labels.
residual : np.ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k])
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression")
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred):
return np.mean((y - pred.ravel()) ** 2.0)
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_mask, learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred):
return np.abs(y - pred.ravel()).mean()
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
tree.value[leaf, 0, 0] = np.median(y.take(terminal_region, axis=0) -
pred.take(terminal_region, axis=0))
class HuberLossFunction(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
return (sq_loss + lin_loss) / y.shape[0]
def negative_gradient(self, y, pred, **kargs):
pred = pred.ravel()
diff = y - pred
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = np.median(diff)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
return (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
val = stats.scoreatpercentile(diff, self.percentile)
tree.value[leaf, 0] = val
class BinomialDeviance(LossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - 1.0 / (1.0 + np.exp(-pred.ravel()))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(y - prob) / sum(prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
numerator = residual.sum()
denominator = np.sum((y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
class MultinomialDeviance(LossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
numerator = residual.sum()
numerator *= (self.K - 1) / self.K
denominator = np.sum((y - residual) * (1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'bdeviance': BinomialDeviance,
'mdeviance': MultinomialDeviance,
'deviance': None} # for both, multinomial and binomial
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
max_features=self.max_features,
random_state=random_state)
sample_weight = None
if self.subsample < 1.0:
sample_weight = sample_mask.astype(np.float64)
tree.fit(X, residual,
sample_weight=sample_weight, check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_mask, self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0")
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0")
if (self.loss not in self._SUPPORTED_LOSS or
self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss in ('mdeviance', 'bdeviance'):
warn(("Loss '{0:s}' is deprecated as of version 0.14. "
"Use 'deviance' instead. ").format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if self.subsample <= 0.0 or self.subsample > 1:
raise ValueError("subsample must be in (0,1]")
if self.init is not None:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init must be valid estimator")
self.init_ = self.init
else:
self.init_ = self.loss_.init_estimator()
if not (0.0 < self.alpha and self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0)")
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features)
self.max_features_ = max_features
def fit(self, X, y):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
Returns
-------
self : object
Returns self.
"""
# Check input
X, = check_arrays(X, dtype=DTYPE, sparse_format="dense",
check_ccontiguous=True)
y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
self.n_features = n_features
random_state = check_random_state(self.random_state)
# Check parameters
self._check_params()
# pull freq used parameters into local scope
subsample = self.subsample
loss_ = self.loss_
do_oob = subsample < 1.0
# allocate model state data structures
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
if do_oob:
self._oob_score_ = np.zeros((self.n_estimators), dtype=np.float64)
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
sample_mask = np.ones((n_samples,), dtype=np.bool)
n_inbag = max(1, int(subsample * n_samples))
if self.verbose:
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
if do_oob:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
verbose_fmt = ' '.join(verbose_fmt)
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
# plot verbose info each time i % verbose_mod == 0
verbose_mod = 1
start_time = time()
# fit initial model
self.init_.fit(X, y)
# init predictions
y_pred = self.init_.predict(X)
# init criterion and splitter
criterion = MSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
random_state)
# perform boosting iterations
for i in range(self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_mask,
criterion, splitter, random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask])
self._oob_score_[i] = loss_(y[~sample_mask],
y_pred[~sample_mask])
self.oob_improvement_[i] = old_oob_score - self._oob_score_[i]
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = self.loss_(y, y_pred)
if self.verbose > 0:
if (i + 1) % verbose_mod == 0:
oob_impr = self.oob_improvement_[i] if do_oob else 0
remaining_time = ((self.n_estimators - (i + 1)) *
(time() - start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(verbose_fmt.format(iter=i + 1,
train_score=self.train_score_[i],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
verbose_mod *= 10
return self
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, call `fit` "
"before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. Classes are
ordered by arithmetical order. Regression and binary
classification are special cases with ``k == 1``,
otherwise ``k==n_classes``.
"""
X = array2d(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. Classes are
ordered by arithmetical order. Regression and binary
classification are special cases with ``k == 1``,
otherwise ``k==n_classes``.
"""
X = array2d(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.n_estimators):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
@property
def oob_score_(self):
warn("The oob_score_ argument is replaced by oob_improvement_"
" as of version 0.14 and will be removed in 0.16.",
DeprecationWarning)
try:
return self._oob_score_
except AttributeError:
raise ValueError("Estimator not fitted, "
"call `fit` before `oob_score_`.")
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Parameters
----------
loss : {'deviance'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency).
If greater than 1 then it prints progress and performance for every tree.
Attributes
----------
`feature_importances_` : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_improvement_` : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
`oob_score_` : array, shape = [n_estimators]
Score of the training dataset obtained using an out-of-bag estimate.
The i-th score ``oob_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the out-of-bag sample.
Deprecated: use `oob_improvement_` instead.
`train_score_` : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
`loss_` : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'mdeviance', 'bdeviance')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2, min_samples_leaf=1,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0):
super(GradientBoostingClassifier, self).__init__(
loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, max_depth, init, subsample, max_features,
random_state, verbose=verbose)
def fit(self, X, y):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
Returns
-------
self : object
Returns self.
"""
y = column_or_1d(y, warn=True)
self.classes_, y = unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return super(GradientBoostingClassifier, self).fit(X, y)
def _score_to_proba(self, score):
"""Compute class probability estimates from decision scores. """
proba = np.ones((score.shape[0], self.n_classes_), dtype=np.float64)
if not self.loss_.is_multi_class:
proba[:, 1] = 1.0 / (1.0 + np.exp(-score.ravel()))
proba[:, 0] -= proba[:, 1]
else:
proba = np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
return proba
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
score = self.decision_function(X)
return self._score_to_proba(score)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self.staged_decision_function(X):
yield self._score_to_proba(score)
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
proba = self.predict_proba(X)
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
def staged_predict(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for proba in self.staged_predict_proba(X):
yield self.classes_.take(np.argmax(proba, axis=1), axis=0)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency).
If greater than 1 then it prints progress and performance for every tree.
Attributes
----------
`feature_importances_` : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_improvement_` : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
`oob_score_` : array, shape = [n_estimators]
Score of the training dataset obtained using an out-of-bag estimate.
The i-th score ``oob_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the out-of-bag sample.
Deprecated: use `oob_improvement_` instead.
`train_score_` : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
`loss_` : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2, min_samples_leaf=1,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0):
super(GradientBoostingRegressor, self).__init__(
loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, max_depth, init, subsample, max_features,
random_state, alpha, verbose)
def fit(self, X, y):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
Returns
-------
self : object
Returns self.
"""
self.n_classes_ = 1
return super(GradientBoostingRegressor, self).fit(X, y)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples]
The predicted values.
"""
return self.decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self.staged_decision_function(X):
yield y.ravel()
|
bsd-3-clause
| 8,973,508,359,669,950,000
| 36.260365
| 81
| 0.582228
| false
| 4.10337
| false
| false
| false
|
linuxdeepin/deepin-ui
|
dtk/ui/tooltip.py
|
1
|
22624
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Xia Bin
#
# Author: Xia Bin <xiabin@linuxdeepin.com>
# Maintainer: Xia Bin <xiabin@linuxdeepin.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from animation import Animation, LinerInterpolator
from gtk import gdk
from label import Label
from theme import ui_theme
from utils import propagate_expose, color_hex_to_cairo, cairo_disable_antialias
import cairo
import gobject
import gtk
__all__ = ["text", "custom", "show_tooltip", "show_delay", "hide_delay", "hide_duration",
"background", "padding", "show_now", "has_shadow", "disable", "always_update",
"disable_all"]
class ChildLocation:
def __init__(self):
self.x = 0
self.y = 0
self.child = None
self.container = None
def window_to_alloc(widget, x, y):
if widget.get_has_window() and widget.parent:
(wx, wy) = widget.window.get_position()
x += wx - widget.allocation.x
y += wy - widget.allocation.y
else:
x -= widget.allocation.x
y -= widget.allocation.y
return (x, y)
def child_location_foreach(widget, cl): #cl = child_location
if not widget.is_drawable():
return
if widget.get_realized() and not cl.child :
#TODO: may invalid to untuple!.
(x, y) = cl.container.translate_coordinates(widget, int(cl.x), int(cl.y))
if x >= 0 and x < widget.allocation.width and \
y >=0 and y < widget.allocation.height:
if isinstance(widget, gtk.Container):
tmp = ChildLocation()
(tmp.x, tmp.y, tmp.container) = (x, y, widget)
widget.forall(child_location_foreach, tmp)
if tmp.child:
cl.child = tmp.child
else:
cl.child = widget
else:
cl.child = widget
def coords_to_parent(window, x, y):
if window.get_window_type() == gdk.WINDOW_OFFSCREEN:
(px, py) = (-1, -1)
window.emit("to-embedder", window, x, y, px, py)
return (px, py)
else:
p = window.get_position()
return (x + p[0], y + p[1])
def find_at_coords(gdkwindow, window_x, window_y):
cl = ChildLocation()
try:
widget = gdkwindow.get_user_data()
except:
return (None, cl.x, cl.y)
cl.x = window_x
cl.y = window_y
while gdkwindow and gdkwindow != widget.window:
(cl.x, cl.y) = coords_to_parent(gdkwindow, cl.x, cl.y)
gdkwindow = gdkwindow.get_effective_parent()
if not gdkwindow:
return (None, cl.x, cl.y)
(cl.x, cl.y) = window_to_alloc(widget, cl.x, cl.y)
#find child
if isinstance(widget, gtk.Container):
cl.container = widget
cl.child = None
tmp_widget = widget
widget.forall(child_location_foreach, cl)
if cl.child and WidgetInfo.get_info(cl.child):
widget = cl.child
elif cl.container and WidgetInfo.get_info(cl.container):
widget = cl.container
(cl.x, cl.y) = tmp_widget.translate_coordinates(widget, int(cl.x), int(cl.y))
if WidgetInfo.get_info(widget):
return (widget, cl.x, cl.y)
p = widget.get_parent()
while p:
if WidgetInfo.get_info(p):
return (p, cl.x, cl.y)
else:
p = p.get_parent()
return (None, cl.x, cl.y)
def update_tooltip():
'''
this function will be invoked every gdk event has received.
so reduce the time as possible as we can.
'''
if TooltipInfo.enable_count == 0:
return
try :
(window, x, y) = display.get_window_at_pointer()
except:
return True
(widget, tx, ty) = find_at_coords(window, x, y)
if widget == None:
pass
# print "nop"
if not widget \
or tx < 0 or tx >= widget.allocation.width \
or ty < 0 or ty >= widget.allocation.height:
hide_tooltip()
return True
if TooltipInfo.widget != widget:
TooltipInfo.prewidget = widget
TooltipInfo.winfo = WidgetInfo.get_info(widget)
TooltipInfo.show_delay = TooltipInfo.winfo.show_delay
TooltipInfo.tmpwidget = widget
(rx, ry) = window.get_origin()
if TooltipInfo.pos_info != (int(rx+x), int(ry+y)) and TooltipInfo.show_id != 0:
hide_tooltip()
if TooltipInfo.show_id == 0:
if TooltipInfo.in_quickshow:
show_delay = 300
else:
show_delay = TooltipInfo.winfo.show_delay
TooltipInfo.pos_info = (int(rx+x), int(ry+y))
TooltipInfo.show_id = gobject.timeout_add(show_delay, lambda : show_tooltip(*TooltipInfo.pos_info))
def show_now():
try :
(window, x, y) = display.get_window_at_pointer()
except:
return True
(widget, tx, ty) = find_at_coords(window, x, y)
if widget == None:
pass
if not widget \
or tx < 0 or tx >= widget.allocation.width \
or ty < 0 or ty >= widget.allocation.height:
hide_tooltip()
return True
if TooltipInfo.widget != widget:
TooltipInfo.prewidget = widget
TooltipInfo.winfo = WidgetInfo.get_info(widget)
TooltipInfo.show_delay = TooltipInfo.winfo.show_delay
TooltipInfo.tmpwidget = widget
(rx, ry) = window.get_origin()
if TooltipInfo.pos_info != (int(rx+x), int(ry+y)) and TooltipInfo.show_id != 0:
hide_tooltip()
show_tooltip(int(rx+x), int(ry+y))
class TooltipInfo:
widget = None
tmpwidget = None
prewidget = None
pos_info = None
window = None
alignment = None
winfo = None
offset_x = 5
offset_y = 5
on_showing = False
need_update = True
#displays = []
stamp = 0
enable_count = 0
show_id = 0
in_quickshow = False
quickshow_id = 0
quickshow_delay = 2500
def generate_tooltip_content():
''' generate child widget and update the TooltipInfo'''
if TooltipInfo.widget == TooltipInfo.prewidget and TooltipInfo.alignment.child and not TooltipInfo.need_update:
return
TooltipInfo.widget = TooltipInfo.tmpwidget
TooltipInfo.winfo = WidgetInfo.get_info(TooltipInfo.widget)
winfo = TooltipInfo.winfo
pre_child = TooltipInfo.alignment.child
if pre_child and winfo == WidgetInfo.get_info(pre_child) and not TooltipInfo.need_update:
return
if winfo.custom:
child = winfo.custom(*winfo.custom_args, **winfo.custom_kargs)
elif winfo.text:
child = Label(winfo.text, *winfo.text_args, **winfo.text_kargs)
else:
raise Warning, "tooltip enable's widget must has text or custom property"
if pre_child:
TooltipInfo.alignment.remove(pre_child)
pre_child.destroy()
TooltipInfo.alignment.set_padding(winfo.padding_t, winfo.padding_l, winfo.padding_b, winfo.padding_r)
TooltipInfo.alignment.add(child)
TooltipInfo.alignment.show_all()
allocation = gtk.gdk.Rectangle(0, 0, *TooltipInfo.alignment.child.size_request())
allocation.width += winfo.padding_l + winfo.padding_r
allocation.height += winfo.padding_t + winfo.padding_b
TooltipInfo.window.size_allocate(allocation)
TooltipInfo.window.modify_bg(gtk.STATE_NORMAL, winfo.background)
if winfo.always_update:
TooltipInfo.need_update = True
else:
TooltipInfo.need_update = False
def enable_quickshow():
def disable_q():
TooltipInfo.in_quickshow = False
if TooltipInfo.quickshow_id != 0:
gobject.source_remove(TooltipInfo.quickshow_id)
TooltipInfo.in_quickshow = True
if TooltipInfo.quickshow_id == 0:
TooltipInfo.quickshow_id = gobject.timeout_add(TooltipInfo.quickshow_delay, disable_q)
else:
gobject.source_remove(TooltipInfo.quickshow_id)
TooltipInfo.quickshow_id = gobject.timeout_add(TooltipInfo.quickshow_delay, disable_q)
def hide_tooltip():
TooltipInfo.window.hide()
TooltipInfo.on_showing = False
if TooltipInfo.show_id != 0:
gobject.source_remove(TooltipInfo.show_id)
TooltipInfo.show_id = 0
if TooltipInfo.window.get_realized():
TooltipInfo.window.animation.stop()
return False
def show_tooltip(x, y):
if TooltipInfo.enable_count == 0 or not TooltipInfo.winfo.enable:
return
generate_tooltip_content()
enable_quickshow()
#What will happen if the content widget is very big?
#----------------------------------------------
(p_w, p_h) = (10, 10) #TODO: pointer size ?
(w, h) = TooltipInfo.window.get_root_window().get_size()
(t_w, t_h) = TooltipInfo.window.size_request()
if x + p_w + t_w > w:
POS_H = 0 #left
else:
POS_H = 1 #right
if y + p_h + t_h > h:
POS_V = 2 #top
else:
POS_V = 4 #bttom
p = POS_H + POS_V
######################################
# LEFT(0) RIGHT(1) #
#------------------------------------#
#TOP(2) 2 3 #
#------------------------------------#
#BOTTOM(4) 4 5 #
######################################
if p == 2:
TooltipInfo.window.move(x - t_w, y - t_h)
elif p == 3:
TooltipInfo.window.move(x, y - t_h)
elif p == 4:
TooltipInfo.window.move(x - t_w, y)
elif p == 5:
TooltipInfo.window.move(x + p_w, y + p_h)
else:
assert False, "This shouldn't appaer!!!!!!"
#------------------------------------------
TooltipInfo.window.show()
TooltipInfo.on_showing = True
def __init_window():
def on_realize(win):
win.swindow = gtk.gdk.Window(win.get_parent_window(),
width=0, height=0,
window_type=gtk.gdk.WINDOW_TEMP,
wclass=gtk.gdk.INPUT_OUTPUT,
event_mask=(win.get_events() | gdk.EXPOSURE_MASK),
visual=win.get_visual(),
colormap=win.get_colormap(),
)
win.swindow.set_user_data(win)
#TODO: set duration dynamicly
win.animation = Animation([win.window, win.swindow], gdk.Window.set_opacity, 1000, [0, 1],
lambda *args: 1 - LinerInterpolator(*args))
def on_map(win):
winfo = TooltipInfo.winfo
win.animation.init(1)
win.animation.start_after(winfo.hide_delay)
geo = win.window.get_geometry()
win.swindow.move_resize(geo[0]+TooltipInfo.offset_x, geo[1]+TooltipInfo.offset_y,
win.allocation.width, win.allocation.height)
win.swindow.show()
def on_expose_event(win, e):
cr = win.swindow.cairo_create()
cr.set_source_rgba(1, 1, 1, 0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
winfo = TooltipInfo.winfo
if winfo.has_shadow:
(x, y, width, height) = (0, 0, win.allocation.width, win.allocation.height)
(o_x, o_y) = (5, 5)
#right-bottom corner
radial = cairo.RadialGradient(width - o_x, height-o_y, 1, width -o_x, height-o_y, o_x)
radial.add_color_stop_rgba(0.0, 0,0,0, 0.3)
radial.add_color_stop_rgba(0.6, 0,0,0, 0.1)
radial.add_color_stop_rgba(1, 0,0,0, 0)
cr.set_source(radial)
cr.rectangle(width-o_x, height-o_y, o_x, o_y)
cr.fill()
#left-bottom corner
radial = cairo.RadialGradient(o_x, height-o_y, 1, o_x, height-o_y, o_x)
radial.add_color_stop_rgba(0.0, 0,0,0, 0.3)
radial.add_color_stop_rgba(0.6, 0,0,0, 0.1)
radial.add_color_stop_rgba(1, 0,0,0, 0)
cr.set_source(radial)
cr.rectangle(0, height-o_y, o_x, o_y)
cr.fill()
#left-top corner
radial = cairo.RadialGradient(width-o_x, o_y, 1, width-o_x, o_y, o_x)
radial.add_color_stop_rgba(0.0, 0,0,0, 0.3)
radial.add_color_stop_rgba(0.6, 0,0,0, 0.1)
radial.add_color_stop_rgba(1, 0,0,0, 0)
cr.set_source(radial)
cr.rectangle(width-o_x, 0, o_x, o_y)
cr.fill()
vradial = cairo.LinearGradient(0, height-o_y, 0, height)
vradial.add_color_stop_rgba(0.0, 0,0,0, .5)
vradial.add_color_stop_rgba(0.4, 0,0,0, 0.25)
vradial.add_color_stop_rgba(1, 0,0,0, 0.0)
cr.set_source(vradial)
cr.rectangle(o_x, height-o_x, width-2*o_x, height)
cr.fill()
hradial = cairo.LinearGradient(width-o_x, 0, width, 0)
hradial.add_color_stop_rgba(0.0, 0,0,0, .5)
hradial.add_color_stop_rgba(0.4, 0,0,0, 0.25)
hradial.add_color_stop_rgba(1, 0,0,0, 0.0)
cr.set_source(hradial)
cr.rectangle(width-o_x, o_y, width, height-2*o_y)
cr.fill()
gtk.Alignment.do_expose_event(TooltipInfo.alignment, e)
propagate_expose(win, e)
return True
def on_unmap(win):
win.swindow.hide()
def on_expose_alignment(widget, event):
'''Expose tooltip label.'''
rect = widget.allocation
cr = widget.window.cairo_create()
with cairo_disable_antialias(cr):
cr.set_line_width(1)
cr.set_source_rgba(*color_hex_to_cairo(ui_theme.get_color("tooltip_frame").get_color()))
cr.rectangle(rect.x + 1, rect.y + 1, rect.width - 1, rect.height - 1)
cr.stroke()
return True
TooltipInfo.window = gtk.Window(gtk.WINDOW_POPUP)
TooltipInfo.window.set_colormap(gtk.gdk.Screen().get_rgba_colormap())
TooltipInfo.alignment = gtk.Alignment()
TooltipInfo.window.add(TooltipInfo.alignment)
TooltipInfo.window.connect('realize', on_realize)
TooltipInfo.window.connect('map', on_map)
TooltipInfo.window.connect('unmap', on_unmap)
TooltipInfo.window.connect('expose-event', on_expose_event)
TooltipInfo.alignment.connect('expose-event', on_expose_alignment)
__init_window()
#TODO:detect display?
#FIXME:
display = None
def init_widget(widget):
TooltipInfo.enable_count += 1
w_info = WidgetInfo()
WidgetInfo.set_info(widget, w_info)
if widget.get_has_window():
widget.add_events(gdk.POINTER_MOTION_MASK|gdk.POINTER_MOTION_HINT_MASK)
else:
widget.connect('realize',
lambda w: w.window.set_events(w.window.get_events() | gdk.POINTER_MOTION_HINT_MASK | gdk.POINTER_MOTION_MASK))
if not display:
init_tooltip(widget)
return w_info
def init_tooltip(win):
global display
if not display:
display = win.get_display()
#gobject.timeout_add(100, lambda : update_tooltip(display))
#win.connect('focus-out-event', lambda w, e: hide_tooltip(True))
win.connect('leave-notify-event', lambda w, e: hide_tooltip())
#
#the Interface of dtk Tooltip, the core is the WidgetInfo's attribute
#
class WidgetInfo(object):
__DATA_NAME = "_deepin_tooltip_info"
@staticmethod
def get_info(widget):
return widget.get_data(WidgetInfo.__DATA_NAME)
@staticmethod
def set_info(widget, info):
return widget.set_data(WidgetInfo.__DATA_NAME, info)
def __init__(self):
object.__setattr__(self, "show_delay", 1000)
object.__setattr__(self, "hide_delay", 3000)
object.__setattr__(self, "hide_duration", 1000)
object.__setattr__(self, "text", None)
object.__setattr__(self, "text_args", None)
object.__setattr__(self, "text_kargs", None)
object.__setattr__(self, "custom", None)
object.__setattr__(self, "custom_args", None)
object.__setattr__(self, "custom_kargs", None)
object.__setattr__(self, "background", gtk.gdk.Color(ui_theme.get_color("tooltip_background").get_color()))
object.__setattr__(self, "padding_t", 5)
object.__setattr__(self, "padding_b", 5)
object.__setattr__(self, "padding_l", 5)
object.__setattr__(self, "padding_r", 5)
object.__setattr__(self, "has_shadow", True)
object.__setattr__(self, "enable", False) #don't modify the "enable" init value
object.__setattr__(self, "always_update", False)
def __setattr__(self, key, value):
if hasattr(self, key):
object.__setattr__(self, key, value)
else:
raise Warning, "Tooltip didn't support the \"%s\" property" % key
TooltipInfo.need_update = True
if key == "text" or key == "custom":
self.enable = True
all_method = {}
def chainmethod(func):
all_method[func.__name__] = func
def wrap(*args, **kargs):
return func(*args, **kargs)
wrap.__dict__ = all_method
return wrap
#
#you can write yourself wrap function use "set_value" or direct modify the WidgetInfo's attribute
#
@chainmethod
def set_value(widgets, kv):
if not isinstance(widgets, list):
widgets = [widgets]
for w in widgets:
w_info = WidgetInfo.get_info(w)
if not w_info:
w_info = init_widget(w)
for k in kv:
setattr(w_info, k, kv[k])
return set_value
#------------------the default wrap function ---------------------------------------
@chainmethod
def text(widget, content, *args, **kargs):
'''
set the tooltip's text content.
the "content", "*args" and "**kargs" are pass to the dtk.ui.Label,
so you can change the text's color and some other property.
@param widget: the widget of you want to change.
@param content: the text which you want show.
@param args: pass to the dtk.ui.Label
@param kargs: pass to the dtk.ui.Label
'''
set_value(widget, {
"text": content,
"text_args":args,
"text_kargs":kargs
})
return text
@chainmethod
def custom(widget, cb, *args, **kargs):
'''
Set the custom tooltip content.
@param widget: the widget of you want to change.
@param cb: the function used to generate the content widget. this function should return an gtk.Widget. Be careful: if this function generate it's content affected by other runtime factor, you alsow should use "always_update"
to disable the internal cache mechanism
@param args: pass to the cb
@param kargs: pass to the cb
'''
set_value(widget, {
"custom" : cb,
"custom_args" : args,
"custom_kargs" : kargs
})
return custom
@chainmethod
def show_delay(widget, delay):
'''
set the time of the tooltip's begin show after pointer stay on the widget.
@param widget: the widget of you want to change.
@param delay: the time of start begin show.
'''
delay = max(250, delay)
set_value(widget, {"show_delay": delay})
return show_delay
@chainmethod
def hide_delay(widget, delay):
'''
set the time of the tooltip's start to hide.
@param widget: the widget of you want to change.
@param delay: the time of start begin hide.
'''
set_value(widget, {"hide_delay": delay})
return hide_delay
@chainmethod
def hide_duration(widget, delay):
'''
set the duration of the tooltip's hide effect duration.
@param widget: the widget of you want to change.
@param delay: the time of the effect duration.
'''
set_value(widget, {"hide_duration": delay})
return hide_duration
@chainmethod
def background(widget, color):
'''
set the background of the tooltip's content.
@param widget: the widget of you want to change.
@param color: the gdk.Color of background.
'''
set_value(widget, {"background": color})
return background
@chainmethod
def padding(widget, t, l, b, r):
'''
set the padding of the tooltip's content.
@param widget: the widget of you want to change.
@param t: the top space
@param l: the left space
@param b: the bottom space
@param r: the right space
'''
kv = {}
if t >= 0:
kv["padding_t"] = int(t)
if b >= 0:
kv["padding_b"] = int(b)
if l >= 0:
kv["padding_l"] = int(l)
if r >= 0:
kv["padding_r"] = int(r)
set_value(widget, kv)
return padding
@chainmethod
def has_shadow(widget, need):
'''
whether this widget's tooltip need shadow.
@param widget: the widget of you want disable tooltip.
@param need : wheter need shadow .
'''
set_value(widget, {"has_shadow": need})
return has_shadow
@chainmethod
def disable(widget, is_disable):
'''
disable this widget's tooltip
@param widget: the widget of you want disable tooltip.
@param is_disable: wheter disable tooltip.
'''
winfo = WidgetInfo.get_info(widget)
if is_disable:
if winfo and winfo.enable:
winfo.enable = False
TooltipInfo.enable_count -= 1
else:
if winfo and not winfo.enable:
winfo.enable = True
TooltipInfo.enable_count += 1
return disable
@chainmethod
def always_update(widget, need):
'''
Always create the new tooltip's content, used to show the
curstom tooltip content generate by function and the function's
return widget is different every time be invoked.
@param widget: Gtk.Widget instance.
@param need: whether alwasy update.
'''
set_value(widget, {"always_update" : need})
return always_update
#------------------------this is global effect function---------------------
def disable_all(is_disable):
'''
'''
count = TooltipInfo.enable_count
if is_disable:
if count > 0:
TooltipInfo.enable_count = -count
else:
if count < 0:
TooltipInfo.enable_count = -count
def tooltip_handler(event):
gtk.main_do_event(event)
if event.type == gdk.MOTION_NOTIFY:
# print "leave", time.time()
update_tooltip()
elif event.type == gdk.LEAVE_NOTIFY:
# print "leave", time.time()
hide_tooltip()
gdk.event_handler_set(tooltip_handler)
|
gpl-3.0
| -150,503,247,418,591,170
| 30.730715
| 230
| 0.594457
| false
| 3.41649
| false
| false
| false
|
jackjlynch/github-activity-mirror
|
gh_copy.py
|
1
|
1333
|
from lxml import html
import requests
import argparse
from datetime import datetime
from git import Repo, Actor
def main():
parser = argparse.ArgumentParser(description='Copy a user\'s Github commit activity')
parser.add_argument('user')
parser.add_argument('repo_dir')
parser.add_argument('name')
parser.add_argument('email')
args = parser.parse_args()
page = requests.get('http://github.com/' + args.user)
tree = html.fromstring(page.content)
days = tree.xpath('//*[@id="contributions-calendar"]/div[1]/svg/g/g/rect')
contribs = {}
for day in days:
date = datetime.strptime(day.get('data-date'), '%Y-%m-%d')
contribs[date] = int(day.get('data-count'))
repo = Repo(args.repo_dir)
assert not repo.bare
start_date = datetime.fromtimestamp(0)
#making some dangerous assumptions here
if len(repo.heads) > 0:
start_date = datetime.fromtimestamp(repo.heads.master.commit.authored_date)
index = repo.index
author = Actor(args.name, args.email)
for date in contribs:
for i in range(contribs[date]):
if date > start_date:
commit = index.commit('', author=author, committer=author, author_date=date.isoformat())
assert commit.type == 'commit'
if __name__ == '__main__':
main()
|
gpl-3.0
| 1,612,143,610,113,723,100
| 30
| 104
| 0.642911
| false
| 3.652055
| false
| false
| false
|
AISystena/web_crawler
|
lib/image_cnn/ImagePredictor.py
|
1
|
1517
|
# coding: utf-8
import os.path
import pickle
import numpy as np
#from chainer import cuda
import chainer.functions as F
import Util
"""
CNNによる画像分類 (posi-nega)
- 5層のディープニューラルネット
"""
class ImagePredictor:
def __init__(self, gpu=0):
current_dir_path = os.path.dirname(__file__)
self.model_pkl = current_dir_path + '/model/image_cnn.pkl'
self.gpu = gpu
def load_model(self):
'''
modelを読み込む
'''
model = None
if os.path.exists(self.model_pkl):
with open(self.model_pkl, 'rb') as pkl:
model = pickle.load(pkl)
return model
def makeGpuAvailable(self, model):
# GPUを使うかどうか
if self.gpu >= 0:
pass
#cuda.check_cuda_available()
#cuda.get_device(self.gpu).use()
#model.to_gpu()
#xp = np if self.gpu < 0 else cuda.cupy
xp = np
return xp
def predict(self, image_path):
# モデルの定義
model = self.load_model()
if model is None:
print("model is empty")
exit()
xp = self.makeGpuAvailable(model)
x = Util.load_image(image_path)
x = xp.asarray(x.reshape((1,)+x.shape))
pred_y = F.softmax(model.predictor(x).data).data
for i, p in enumerate(pred_y[0]):
print("[{0:02d}]:{1:.3f}%".format(i, float(p)))
y = xp.argmax(pred_y[0])
return y
|
mit
| 6,428,009,083,430,815,000
| 23.810345
| 66
| 0.540653
| false
| 3.016771
| false
| false
| false
|
oscarpilote/Ortho4XP
|
src/O4_Mesh_Utils.py
|
1
|
24769
|
import time
import sys
import os
import pickle
import subprocess
import numpy
import requests
from math import sqrt, cos, pi
import O4_DEM_Utils as DEM
import O4_UI_Utils as UI
import O4_File_Names as FNAMES
import O4_Geo_Utils as GEO
import O4_Vector_Utils as VECT
import O4_OSM_Utils as OSM
import O4_Version
if 'dar' in sys.platform:
Triangle4XP_cmd = os.path.join(FNAMES.Utils_dir,"Triangle4XP.app ")
triangle_cmd = os.path.join(FNAMES.Utils_dir,"triangle.app ")
sort_mesh_cmd = os.path.join(FNAMES.Utils_dir,"moulinette.app ")
unzip_cmd = "7z "
elif 'win' in sys.platform:
Triangle4XP_cmd = os.path.join(FNAMES.Utils_dir,"Triangle4XP.exe ")
triangle_cmd = os.path.join(FNAMES.Utils_dir,"triangle.exe ")
sort_mesh_cmd = os.path.join(FNAMES.Utils_dir,"moulinette.exe ")
unzip_cmd = os.path.join(FNAMES.Utils_dir,"7z.exe ")
else:
Triangle4XP_cmd = os.path.join(FNAMES.Utils_dir,"Triangle4XP ")
triangle_cmd = os.path.join(FNAMES.Utils_dir,"triangle ")
sort_mesh_cmd = os.path.join(FNAMES.Utils_dir,"moulinette ")
unzip_cmd = "7z "
community_server=False
if os.path.exists(os.path.join(FNAMES.Ortho4XP_dir,"community_server.txt")):
try:
f=open(os.path.join(FNAMES.Ortho4XP_dir,"community_server.txt"),'r')
for line in f.readlines():
line=line.strip()
if not line: continue
if '#' in line:
if line[0]=='#': continue
else: line=line.split('#')[0].strip()
if not line: continue
community_server=True
community_prefix=line
break
except:
pass
def community_mesh(tile):
if not community_server:
UI.exit_message_and_bottom_line("\nERROR: No community server defined in community_server.txt")
return 0
url=community_prefix+os.path.basename(FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon))+'.7z'
timer=time.time()
UI.vprint(0,"Querying",url,"...")
try:
r=requests.get(url,timeout=30)
if '[200]' in str(r):
UI.vprint(0,"We've got something !")
f=open(FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon)+'.7z','wb')
f.write(r.content)
f.close()
if subprocess.call([unzip_cmd.strip(),'e','-y','-o'+tile.build_dir,FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon)+".7z"]):
UI.exit_message_and_bottom_line("\nERROR: Could not extract community_mesh from archive.")
return 0
os.remove(FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon)+'.7z')
UI.timings_and_bottom_line(timer)
return 1
elif '[40' in str(r):
UI.exit_message_and_bottom_line("\nSORRY: Community server does not propose that mesh: "+str(r))
return 0
elif '[50' in str(r):
UI.exit_message_and_bottom_line("\nSORRY: Community server seems to be down or struggling: "+str(r))
return 0
else:
UI.exit_message_and_bottom_line("\nSORRY: Community server seems to be down or struggling: "+str(r))
return 0
except Exception as e:
UI.exit_message_and_bottom_line("\nERROR: Network or server unreachable:\n"+str(e))
return 0
##############################################################################
def is_in_region(lat,lon,latmin,latmax,lonmin,lonmax):
return lat>=latmin and lat<=latmax and lon>=lonmin and lon<=lonmax
##############################################################################
##############################################################################
def build_curv_tol_weight_map(tile,weight_array):
if tile.apt_curv_tol!=tile.curvature_tol and tile.apt_curv_tol>0:
UI.vprint(1,"-> Modifying curv_tol weight map according to runway locations.")
try:
f=open(FNAMES.apt_file(tile),'rb')
dico_airports=pickle.load(f)
f.close()
except:
UI.vprint(1," WARNING: File",FNAMES.apt_file(tile),"is missing (erased after Step 1?), cannot check airport info for upgraded zoomlevel.")
dico_airports={}
for airport in dico_airports:
(xmin,ymin,xmax,ymax)=dico_airports[airport]['boundary'].bounds
x_shift=1000*tile.apt_curv_ext*GEO.m_to_lon(tile.lat)
y_shift=1000*tile.apt_curv_ext*GEO.m_to_lat
colmin=max(round((xmin-x_shift)*1000),0)
colmax=min(round((xmax+x_shift)*1000),1000)
rowmax=min(round(((1-ymin)+y_shift)*1000),1000)
rowmin=max(round(((1-ymax)-y_shift)*1000),0)
weight_array[rowmin:rowmax+1,colmin:colmax+1]=tile.curvature_tol/tile.apt_curv_tol
if tile.coast_curv_tol!=tile.curvature_tol:
UI.vprint(1,"-> Modifying curv_tol weight map according to coastline location.")
sea_layer=OSM.OSM_layer()
custom_coastline=FNAMES.custom_coastline(tile.lat, tile.lon)
custom_coastline_dir=FNAMES.custom_coastline_dir(tile.lat, tile.lon)
if os.path.isfile(custom_coastline):
UI.vprint(1," * User defined custom coastline data detected.")
sea_layer.update_dicosm(custom_coastline,input_tags=None,target_tags=None)
elif os.path.isdir(custom_coastline_dir):
UI.vprint(1," * User defined custom coastline data detected (multiple files).")
for osm_file in os.listdir(custom_coastline_dir):
UI.vprint(2," ",osm_file)
sea_layer.update_dicosm(os.path.join(custom_coastline_dir,osm_file),input_tags=None,target_tags=None)
sea_layer.write_to_file(custom_coastline)
else:
queries=['way["natural"="coastline"]']
tags_of_interest=[]
if not OSM.OSM_queries_to_OSM_layer(queries,sea_layer,tile.lat,tile.lon,tags_of_interest,cached_suffix='coastline'):
return 0
for nodeid in sea_layer.dicosmn:
(lonp,latp)=[float(x) for x in sea_layer.dicosmn[nodeid]]
if lonp<tile.lon or lonp>tile.lon+1 or latp<tile.lat or latp>tile.lat+1: continue
x_shift=1000*tile.coast_curv_ext*GEO.m_to_lon(tile.lat)
y_shift=tile.coast_curv_ext/(111.12)
colmin=max(round((lonp-tile.lon-x_shift)*1000),0)
colmax=min(round((lonp-tile.lon+x_shift)*1000),1000)
rowmax=min(round((tile.lat+1-latp+y_shift)*1000),1000)
rowmin=max(round((tile.lat+1-latp-y_shift)*1000),0)
weight_array[rowmin:rowmax+1,colmin:colmax+1]=numpy.maximum(weight_array[rowmin:rowmax+1,colmin:colmax+1],tile.curvature_tol/tile.coast_curv_tol)
del(sea_layer)
# It could be of interest to write the weight file as a png for user editing
#from PIL import Image
#Image.fromarray((weight_array!=1).astype(numpy.uint8)*255).save('weight.png')
return
##############################################################################
##############################################################################
def post_process_nodes_altitudes(tile):
dico_attributes=VECT.Vector_Map.dico_attributes
f_node = open(FNAMES.output_node_file(tile),'r')
init_line_f_node=f_node.readline()
nbr_pt=int(init_line_f_node.split()[0])
vertices=numpy.zeros(6*nbr_pt)
UI.vprint(1,"-> Loading of the mesh computed by Triangle4XP.")
for i in range(0,nbr_pt):
vertices[6*i:6*i+6]=[float(x) for x in f_node.readline().split()[1:7]]
end_line_f_node=f_node.readline()
f_node.close()
UI.vprint(1,"-> Post processing of altitudes according to vector data")
f_ele = open(FNAMES.output_ele_file(tile),'r')
nbr_tri= int(f_ele.readline().split()[0])
water_tris=set()
sea_tris=set()
interp_alt_tris=set()
for i in range(nbr_tri):
line = f_ele.readline()
# triangle attributes are powers of 2, except for the dummy attributed which doesn't require post-treatment
if line[-2]=='0': continue
(v1,v2,v3,attr)=[int(x)-1 for x in line.split()[1:5]]
attr+=1
if attr >= dico_attributes['INTERP_ALT']:
interp_alt_tris.add((v1,v2,v3))
elif attr & dico_attributes['SEA']:
sea_tris.add((v1,v2,v3))
elif attr & dico_attributes['WATER'] or attr & dico_attributes['SEA_EQUIV']:
water_tris.add((v1,v2,v3))
if tile.water_smoothing:
UI.vprint(1," Smoothing inland water.")
for j in range(tile.water_smoothing):
for (v1,v2,v3) in water_tris:
zmean=(vertices[6*v1+2]+vertices[6*v2+2]+vertices[6*v3+2])/3
vertices[6*v1+2]=zmean
vertices[6*v2+2]=zmean
vertices[6*v3+2]=zmean
UI.vprint(1," Smoothing of sea water.")
for (v1,v2,v3) in sea_tris:
if tile.sea_smoothing_mode=='zero':
vertices[6*v1+2]=0
vertices[6*v2+2]=0
vertices[6*v3+2]=0
elif tile.sea_smoothing_mode=='mean':
zmean=(vertices[6*v1+2]+vertices[6*v2+2]+vertices[6*v3+2])/3
vertices[6*v1+2]=zmean
vertices[6*v2+2]=zmean
vertices[6*v3+2]=zmean
else:
vertices[6*v1+2]=max(vertices[6*v1+2],0)
vertices[6*v2+2]=max(vertices[6*v2+2],0)
vertices[6*v3+2]=max(vertices[6*v3+2],0)
UI.vprint(1," Treatment of airports, roads and patches.")
for (v1,v2,v3) in interp_alt_tris:
vertices[6*v1+2]=vertices[6*v1+5]
vertices[6*v2+2]=vertices[6*v2+5]
vertices[6*v3+2]=vertices[6*v3+5]
vertices[6*v1+3]=0
vertices[6*v2+3]=0
vertices[6*v3+3]=0
vertices[6*v1+4]=0
vertices[6*v2+4]=0
vertices[6*v3+4]=0
UI.vprint(1,"-> Writing output nodes file.")
f_node = open(FNAMES.output_node_file(tile),'w')
f_node.write(init_line_f_node)
for i in range(0,nbr_pt):
f_node.write(str(i+1)+" "+' '.join(('{:.15f}'.format(x) for x in vertices[6*i:6*i+6]))+"\n")
f_node.write(end_line_f_node)
f_node.close()
return vertices
##############################################################################
##############################################################################
def write_mesh_file(tile,vertices):
UI.vprint(1,"-> Writing final mesh to the file "+FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon))
f_ele = open(FNAMES.output_ele_file(tile),'r')
nbr_vert=len(vertices)//6
nbr_tri=int(f_ele.readline().split()[0])
f=open(FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon),"w")
f.write("MeshVersionFormatted "+O4_Version.version+"\n")
f.write("Dimension 3\n\n")
f.write("Vertices\n")
f.write(str(nbr_vert)+"\n")
for i in range(0,nbr_vert):
f.write('{:.7f}'.format(vertices[6*i]+tile.lon)+" "+\
'{:.7f}'.format(vertices[6*i+1]+tile.lat)+" "+\
'{:.7f}'.format(vertices[6*i+2]/100000)+" 0\n")
f.write("\n")
f.write("Normals\n")
f.write(str(nbr_vert)+"\n")
for i in range(0,nbr_vert):
f.write('{:.2f}'.format(vertices[6*i+3])+" "+\
'{:.2f}'.format(vertices[6*i+4])+"\n")
f.write("\n")
f.write("Triangles\n")
f.write(str(nbr_tri)+"\n")
for i in range(0,nbr_tri):
f.write(' '.join(f_ele.readline().split()[1:])+"\n")
f_ele.close()
f.close()
return
##############################################################################
##############################################################################
# Build a textured .obj wavefront over the extent of an orthogrid cell
##############################################################################
def extract_mesh_to_obj(mesh_file,til_x_left,til_y_top,zoomlevel,provider_code):
UI.red_flag=False
timer=time.time()
(latmax,lonmin)=GEO.gtile_to_wgs84(til_x_left,til_y_top,zoomlevel)
(latmin,lonmax)=GEO.gtile_to_wgs84(til_x_left+16,til_y_top+16,zoomlevel)
obj_file_name=FNAMES.obj_file(til_x_left,til_y_top,zoomlevel,provider_code)
mtl_file_name=FNAMES.mtl_file(til_x_left,til_y_top,zoomlevel,provider_code)
f_mesh=open(mesh_file,"r")
for i in range(4):
f_mesh.readline()
nbr_pt_in=int(f_mesh.readline())
UI.vprint(1," Reading nodes...")
pt_in=numpy.zeros(5*nbr_pt_in,'float')
for i in range(nbr_pt_in):
pt_in[5*i:5*i+3]=[float(x) for x in f_mesh.readline().split()[:3]]
for i in range(3):
f_mesh.readline()
for i in range(nbr_pt_in):
pt_in[5*i+3:5*i+5]=[float(x) for x in f_mesh.readline().split()[:2]]
for i in range(0,2): # skip 2 lines
f_mesh.readline()
if UI.red_flag: UI.exit_message_and_bottom_line(); return 0
UI.vprint(1," Reading triangles...")
nbr_tri_in=int(f_mesh.readline()) # read nbr of tris
textured_nodes={}
textured_nodes_inv={}
nodes_st_coord={}
len_textured_nodes=0
dico_new_tri={}
len_dico_new_tri=0
for i in range(0,nbr_tri_in):
(n1,n2,n3)=[int(x)-1 for x in f_mesh.readline().split()[:3]]
(lon1,lat1,z1,u1,v1)=pt_in[5*n1:5*n1+5]
(lon2,lat2,z2,u2,v2)=pt_in[5*n2:5*n2+5]
(lon3,lat3,z3,u3,v3)=pt_in[5*n3:5*n3+5]
if is_in_region((lat1+lat2+lat3)/3.0,(lon1+lon2+lon3)/3.0,latmin,latmax,lonmin,lonmax):
if n1 not in textured_nodes_inv:
len_textured_nodes+=1
textured_nodes_inv[n1]=len_textured_nodes
textured_nodes[len_textured_nodes]=n1
nodes_st_coord[len_textured_nodes]=GEO.st_coord(lat1,lon1,til_x_left,til_y_top,zoomlevel,provider_code)
n1new=textured_nodes_inv[n1]
if n2 not in textured_nodes_inv:
len_textured_nodes+=1
textured_nodes_inv[n2]=len_textured_nodes
textured_nodes[len_textured_nodes]=n2
nodes_st_coord[len_textured_nodes]=GEO.st_coord(lat2,lon2,til_x_left,til_y_top,zoomlevel,provider_code)
n2new=textured_nodes_inv[n2]
if n3 not in textured_nodes_inv:
len_textured_nodes+=1
textured_nodes_inv[n3]=len_textured_nodes
textured_nodes[len_textured_nodes]=n3
nodes_st_coord[len_textured_nodes]=GEO.st_coord(lat3,lon3,til_x_left,til_y_top,zoomlevel,provider_code)
n3new=textured_nodes_inv[n3]
dico_new_tri[len_dico_new_tri]=(n1new,n2new,n3new)
len_dico_new_tri+=1
nbr_vert=len_textured_nodes
nbr_tri=len_dico_new_tri
if UI.red_flag: UI.exit_message_and_bottom_line(); return 0
UI.vprint(1," Writing the obj file.")
# first the obj file
f=open(obj_file_name,"w")
for i in range(1,nbr_vert+1):
j=textured_nodes[i]
f.write("v "+'{:.9f}'.format(pt_in[5*j]-lonmin)+" "+\
'{:.9f}'.format(pt_in[5*j+1]-latmin)+" "+\
'{:.9f}'.format(pt_in[5*j+2])+"\n")
f.write("\n")
for i in range(1,nbr_vert+1):
j=textured_nodes[i]
f.write("vn "+'{:.9f}'.format(pt_in[5*j+3])+" "+'{:.9f}'.format(pt_in[5*j+4])+" "+'{:.9f}'.format(sqrt(max(1-pt_in[5*j+3]**2-pt_in[5*j+4]**2,0)))+"\n")
f.write("\n")
for i in range(1,nbr_vert+1):
j=textured_nodes[i]
f.write("vt "+'{:.9f}'.format(nodes_st_coord[i][0])+" "+\
'{:.9f}'.format(nodes_st_coord[i][1])+"\n")
f.write("\n")
f.write("usemtl orthophoto\n\n")
for i in range(0,nbr_tri):
(one,two,three)=dico_new_tri[i]
f.write("f "+str(one)+"/"+str(one)+"/"+str(one)+" "+str(two)+"/"+str(two)+"/"+str(two)+" "+str(three)+"/"+str(three)+"/"+str(three)+"\n")
f_mesh.close()
f.close()
# then the mtl file
f=open(mtl_file_name,'w')
f.write("newmtl orthophoto\nmap_Kd "+FNAMES.geotiff_file_name_from_attributes(til_x_left,til_y_top,zoomlevel,provider_code)+"\n")
f.close()
UI.timings_and_bottom_line(timer)
return
##############################################################################
##############################################################################
def build_mesh(tile):
if UI.is_working: return 0
UI.is_working=1
UI.red_flag=False
VECT.scalx=cos((tile.lat+0.5)*pi/180)
UI.logprint("Step 2 for tile lat=",tile.lat,", lon=",tile.lon,": starting.")
UI.vprint(0,"\nStep 2 : Building mesh for tile "+FNAMES.short_latlon(tile.lat,tile.lon)+" : \n--------\n")
UI.progress_bar(1,0)
poly_file = FNAMES.input_poly_file(tile)
node_file = FNAMES.input_node_file(tile)
alt_file = FNAMES.alt_file(tile)
weight_file = FNAMES.weight_file(tile)
if not os.path.isfile(node_file):
UI.exit_message_and_bottom_line("\nERROR: Could not find ",node_file)
return 0
if not tile.iterate and not os.path.isfile(poly_file):
UI.exit_message_and_bottom_line("\nERROR: Could not find ",poly_file)
return 0
if not tile.iterate:
if not os.path.isfile(alt_file):
UI.exit_message_and_bottom_line("\nERROR: Could not find",alt_file,". You must run Step 1 first.")
return 0
try:
fill_nodata = tile.fill_nodata or "to zero"
source= ((";" in tile.custom_dem) and tile.custom_dem.split(";")[0]) or tile.custom_dem
tile.dem=DEM.DEM(tile.lat,tile.lon,source,fill_nodata,info_only=True)
if not os.path.getsize(alt_file)==4*tile.dem.nxdem*tile.dem.nydem:
UI.exit_message_and_bottom_line("\nERROR: Cached raster elevation does not match the current custom DEM specs.\n You must run Step 1 and Step 2 with the same elevation base.")
return 0
except Exception as e:
print(e)
UI.exit_message_and_bottom_line("\nERROR: Could not determine the appropriate source. Please check your custom_dem entry.")
return 0
else:
try:
source= ((";" in tile.custom_dem) and tile.custom_dem.split(";")[tile.iterate]) or tile.custom_dem
tile.dem=DEM.DEM(tile.lat,tile.lon,source,fill_nodata=False,info_only=True)
if not os.path.isfile(alt_file) or not os.path.getsize(alt_file)==4*tile.dem.nxdem*tile.dem.nydem:
tile.dem=DEM.DEM(tile.lat,tile.lon,source,fill_nodata=False,info_only=False)
tile.dem.write_to_file(FNAMES.alt_file(tile))
except Exception as e:
print(e)
UI.exit_message_and_bottom_line("\nERROR: Could not determine the appropriate source. Please check your custom_dem entry.")
return 0
try:
f=open(node_file,'r')
input_nodes=int(f.readline().split()[0])
f.close()
except:
UI.exit_message_and_bottom_line("\nERROR: In reading ",node_file)
return 0
timer=time.time()
tri_verbosity = 'Q' if UI.verbosity<=1 else 'V'
output_poly = 'P' if UI.cleaning_level else ''
do_refine = 'r' if tile.iterate else 'A'
limit_tris = 'S'+str(max(int(tile.limit_tris/1.9-input_nodes),0)) if tile.limit_tris else ''
Tri_option = '-p'+do_refine+'uYB'+tri_verbosity+output_poly+limit_tris
weight_array=numpy.ones((1001,1001),dtype=numpy.float32)
build_curv_tol_weight_map(tile,weight_array)
weight_array.tofile(weight_file)
del(weight_array)
curv_tol_scaling=sqrt(tile.dem.nxdem/(1000*(tile.dem.x1-tile.dem.x0)))
hmin_effective=max(tile.hmin,(tile.dem.y1-tile.dem.y0)*GEO.lat_to_m/tile.dem.nydem/2)
mesh_cmd=[Triangle4XP_cmd.strip(),
Tri_option.strip(),
'{:.9g}'.format(GEO.lon_to_m(tile.lat)),
'{:.9g}'.format(GEO.lat_to_m),
'{:n}'.format(tile.dem.nxdem),
'{:n}'.format(tile.dem.nydem),
'{:.9g}'.format(tile.dem.x0),
'{:.9g}'.format(tile.dem.y0),
'{:.9g}'.format(tile.dem.x1),
'{:.9g}'.format(tile.dem.y1),
'{:.9g}'.format(tile.dem.nodata),
'{:.9g}'.format(tile.curvature_tol*curv_tol_scaling),
'{:.9g}'.format(tile.min_angle),str(hmin_effective),alt_file,weight_file,poly_file]
del(tile.dem) # for machines with not much RAM, we do not need it anymore
tile.dem=None
UI.vprint(1,"-> Start of the mesh algorithm Triangle4XP.")
UI.vprint(2,' Mesh command:',' '.join(mesh_cmd))
fingers_crossed=subprocess.Popen(mesh_cmd,stdout=subprocess.PIPE,bufsize=0)
while True:
line = fingers_crossed.stdout.readline()
if not line:
break
else:
try:
print(line.decode("utf-8")[:-1])
except:
pass
time.sleep(0.3)
fingers_crossed.poll()
if fingers_crossed.returncode:
UI.vprint(0,"\nWARNING: Triangle4XP could not achieve the requested quality (min_angle), most probably due to an uncatched OSM error.\n"+\
"It will be tempted now with no angle constraint (i.e. min_angle=0).")
mesh_cmd[-5]='{:.9g}'.format(0)
fingers_crossed=subprocess.Popen(mesh_cmd,stdout=subprocess.PIPE,bufsize=0)
while True:
line = fingers_crossed.stdout.readline()
if not line:
break
else:
try:
print(line.decode("utf-8")[:-1])
except:
pass
time.sleep(0.3)
fingers_crossed.poll()
if fingers_crossed.returncode:
UI.exit_message_and_bottom_line("\nERROR: Triangle4XP really couldn't make it !\n\n"+\
"If the reason is not due to the limited amount of RAM please\n"+\
"file a bug including the .node and .poly files that you\n"+\
"will find in "+str(tile.build_dir)+".\n")
return 0
if UI.red_flag: UI.exit_message_and_bottom_line(); return 0
vertices=post_process_nodes_altitudes(tile)
if UI.red_flag: UI.exit_message_and_bottom_line(); return 0
write_mesh_file(tile,vertices)
#
if UI.cleaning_level:
try: os.remove(FNAMES.weight_file(tile))
except: pass
try: os.remove(FNAMES.output_node_file(tile))
except: pass
try: os.remove(FNAMES.output_ele_file(tile))
except: pass
if UI.cleaning_level>2:
try: os.remove(FNAMES.alt_file(tile))
except: pass
try: os.remove(FNAMES.input_node_file(tile))
except: pass
try: os.remove(FNAMES.input_poly_file(tile))
except: pass
UI.timings_and_bottom_line(timer)
UI.logprint("Step 2 for tile lat=",tile.lat,", lon=",tile.lon,": normal exit.")
return 1
##############################################################################
##############################################################################
def sort_mesh(tile):
if UI.is_working: return 0
UI.is_working=1
UI.red_flag=False
mesh_file = FNAMES.mesh_file(tile.build_dir,tile.lat,tile.lon)
if not os.path.isfile(mesh_file):
UI.exit_message_and_bottom_line("\nERROR: Could not find ",mesh_file)
return 0
sort_mesh_cmd_list=[sort_mesh_cmd.strip(),str(tile.default_zl),mesh_file]
UI.vprint(1,"-> Reorganizing mesh triangles.")
timer=time.time()
moulinette=subprocess.Popen(sort_mesh_cmd_list,stdout=subprocess.PIPE,bufsize=0)
while True:
line = moulinette.stdout.readline()
if not line:
break
else:
print(line.decode("utf-8")[:-1])
UI.timings_and_bottom_line(timer)
UI.logprint("Moulinette applied for tile lat=",tile.lat,", lon=",tile.lon," and ZL",tile.default_zl)
return 1
##############################################################################
##############################################################################
def triangulate(name,path_to_Ortho4XP_dir):
Tri_option = ' -pAYPQ '
mesh_cmd=[os.path.join(path_to_Ortho4XP_dir,triangle_cmd).strip(),Tri_option.strip(),name+'.poly']
fingers_crossed=subprocess.Popen(mesh_cmd,stdout=subprocess.PIPE,bufsize=0)
while True:
line = fingers_crossed.stdout.readline()
if not line:
break
else:
print(line.decode("utf-8")[:-1])
fingers_crossed.poll()
if fingers_crossed.returncode:
print("\nERROR: triangle crashed, check osm mask data.\n")
return 0
return 1
##############################################################################
|
gpl-3.0
| -8,716,772,008,718,751,000
| 45.039033
| 197
| 0.559772
| false
| 3.142477
| false
| false
| false
|
Treeki/NewerSMBW
|
Koopatlas/src/worldeditor.py
|
1
|
5227
|
from common import *
import re
def editableColourStr(array):
return '#%02X%02X%02X (%d)' % tuple(array)
NICE_STR_RE = re.compile('^#([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})\s*(?:\((\d+)\))?$')
def colourFromNiceStr(thing):
match = NICE_STR_RE.match(thing)
try:
if match:
r,g,b,a = match.groups()
return (int(r,16), int(g,16), int(b,16), int(a) if a != None else 255)
except:
pass
return None
class KPWorldTableModel(QtCore.QAbstractTableModel):
FIELDS = ('Name', 'World ID', 'Track ID',
'FS Text 1', 'FS Text 2',
'FS Hint 1', 'FS Hint 2',
'HUD Text 1', 'HUD Text 2',
'HUD Hue', 'HUD Saturation', 'HUD Lightness',
'Title Level')
def __init__(self, kpmap, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self.currentMap = kpmap
self.worlds = kpmap.worlds
def columnCount(self, parent):
return len(self.FIELDS)
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal:
if role == Qt.DisplayRole:
return self.FIELDS[section]
else:
if role == Qt.DisplayRole:
return str(self.worlds[section].uniqueKey)
return QtCore.QVariant()
def rowCount(self, parent):
if parent.isValid():
return 0
else:
return len(self.worlds)
def data(self, index, role):
if index.isValid():
entry = self.worlds[index.row()]
col = index.column()
if role == Qt.DisplayRole or role == Qt.EditRole:
if col == 0:
return entry.name
elif col == 1:
return entry.worldID
elif col == 2:
return entry.musicTrackID
elif col == 3 or col == 4:
return editableColourStr(entry.fsTextColours[col - 3])
elif col == 5 or col == 6:
return editableColourStr(entry.fsHintColours[col - 5])
elif col == 7 or col == 8:
return editableColourStr(entry.hudTextColours[col - 7])
elif col >= 9 and col <= 11:
return entry.hudHintTransform[col - 9]
elif col == 12:
return entry.titleScreenID
if role == Qt.DecorationRole:
if col == 3 or col == 4:
return QtGui.QColor(*entry.fsTextColours[col - 3])
elif col == 5 or col == 6:
return QtGui.QColor(*entry.fsHintColours[col - 5])
elif col == 7 or col == 8:
return QtGui.QColor(*entry.hudTextColours[col - 7])
return QtCore.QVariant()
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
def setData(self, index, value, role):
if index.isValid():
if role == Qt.EditRole:
success = False
entry = self.worlds[index.row()]
col = index.column()
if col == 0:
entry.name = str(value.toString())
success = True
elif col == 1:
entry.worldID = str(value.toString())
success = True
elif col == 2:
v,ok = value.toInt()
if ok:
entry.musicTrackID = v
success = True
elif col >= 3 and col <= 8:
newCol = colourFromNiceStr(str(value.toString()))
if newCol:
success = True
if col == 3:
entry.fsTextColours = (newCol, entry.fsTextColours[1])
elif col == 4:
entry.fsTextColours = (entry.fsTextColours[0], newCol)
elif col == 5:
entry.fsHintColours = (newCol, entry.fsHintColours[1])
elif col == 6:
entry.fsHintColours = (entry.fsHintColours[0], newCol)
elif col == 7:
entry.hudTextColours = (newCol, entry.hudTextColours[1])
elif col == 8:
entry.hudTextColours = (entry.hudTextColours[0], newCol)
elif col >= 9 and col <= 11:
v,ok = value.toInt()
if ok:
new = list(entry.hudHintTransform)
new[col - 9] = v
entry.hudHintTransform = new
success = True
elif col == 12:
entry.titleScreenID = str(value.toString())
success = True
if success:
self.dataChanged.emit(index, index)
return success
return False
def addEntryToEnd(self):
self.beginInsertRows(QtCore.QModelIndex(), len(self.worlds), len(self.worlds))
entry = KPWorldDef()
entry.uniqueKey = self.currentMap.allocateWorldDefKey()
self.worlds.append(entry)
self.endInsertRows()
def removeRows(self, row, count, parent):
if not parent.isValid():
if row >= 0 and (row + count) <= len(self.worlds):
self.beginRemoveRows(parent, row, row+count-1)
for i in xrange(count):
del self.worlds[row]
self.endRemoveRows()
class KPWorldEditor(QtGui.QWidget):
def __init__(self, kpmap, parent=None):
QtGui.QWidget.__init__(self, parent, Qt.Window)
self.setWindowTitle('World Editor')
self.dataView = QtGui.QTableView(self)
self.addButton = QtGui.QPushButton('Add', self)
self.removeButton = QtGui.QPushButton('Remove', self)
layout = QtGui.QGridLayout(self)
layout.addWidget(self.dataView, 0, 0, 1, 2)
layout.addWidget(self.addButton, 1, 0, 1, 1)
layout.addWidget(self.removeButton, 1, 1, 1, 1)
self.model = KPWorldTableModel(kpmap, self)
self.dataView.setModel(self.model)
self.addButton.clicked.connect(self.model.addEntryToEnd)
self.removeButton.clicked.connect(self.removeCurrentEntry)
def removeCurrentEntry(self):
what = self.dataView.selectionModel().currentIndex()
if what.isValid():
what = what.row()
key = self.model.worlds[what].uniqueKey
self.model.removeRows(what, 1, QtCore.QModelIndex())
|
mit
| 4,182,107,282,050,427,000
| 27.562842
| 96
| 0.650277
| false
| 2.89745
| false
| false
| false
|
daviddrysdale/pynamo
|
dynamo2.py
|
1
|
10913
|
"""Implementation of Dynamo
Second iteration, adding detection of missing responses and expansion of the preference list."""
import copy
import random
import logging
import logconfig
from node import Node
from framework import Framework
from hash_multiple import ConsistentHashTable
from dynamomessages import ClientPut, ClientGet, ClientPutRsp, ClientGetRsp
from dynamomessages import PutReq, GetReq, PutRsp, GetRsp
from dynamomessages import DynamoRequestMessage
logconfig.init_logging()
_logger = logging.getLogger('dynamo')
# PART dynamonode
class DynamoNode(Node):
timer_priority = 20
T = 10 # Number of repeats for nodes in consistent hash table
N = 3 # Number of nodes to replicate at
W = 2 # Number of nodes that need to reply to a write operation
R = 2 # Number of nodes that need to reply to a read operation
nodelist = []
chash = ConsistentHashTable(nodelist, T)
def __init__(self):
super(DynamoNode, self).__init__()
self.local_store = {} # key => (value, metadata)
self.pending_put_rsp = {} # seqno => set of nodes that have stored
self.pending_put_msg = {} # seqno => original client message
self.pending_get_rsp = {} # seqno => set of (node, value, metadata) tuples
self.pending_get_msg = {} # seqno => original client message
# seqno => set of requests sent to other nodes, for each message class
self.pending_req = {PutReq: {}, GetReq: {}}
self.failed_nodes = []
# Rebuild the consistent hash table
DynamoNode.nodelist.append(self)
DynamoNode.chash = ConsistentHashTable(DynamoNode.nodelist, DynamoNode.T)
# PART reset
@classmethod
def reset(cls):
cls.nodelist = []
cls.chash = ConsistentHashTable(cls.nodelist, cls.T)
# PART storage
def store(self, key, value, metadata):
self.local_store[key] = (value, metadata)
def retrieve(self, key):
if key in self.local_store:
return self.local_store[key]
else:
return (None, None)
# PART rsp_timer_pop
def rsp_timer_pop(self, reqmsg):
# no response to this request; treat the destination node as failed
_logger.info("Node %s now treating node %s as failed", self, reqmsg.to_node)
self.failed_nodes.append(reqmsg.to_node)
failed_requests = Framework.cancel_timers_to(reqmsg.to_node)
failed_requests.append(reqmsg)
for failedmsg in failed_requests:
self.retry_request(failedmsg)
def retry_request(self, reqmsg):
if not isinstance(reqmsg, DynamoRequestMessage):
return
# Send the request to an additional node by regenerating the preference list
preference_list = DynamoNode.chash.find_nodes(reqmsg.key, DynamoNode.N, self.failed_nodes)[0]
kls = reqmsg.__class__
# Check the pending-request list for this type of request message
if kls in self.pending_req and reqmsg.msg_id in self.pending_req[kls]:
for node in preference_list:
if node not in [req.to_node for req in self.pending_req[kls][reqmsg.msg_id]]:
# Found a node on the new preference list that hasn't been sent the request.
# Send it a copy
newreqmsg = copy.copy(reqmsg)
newreqmsg.to_node = node
self.pending_req[kls][reqmsg.msg_id].add(newreqmsg)
Framework.send_message(newreqmsg)
# PART rcv_clientput
def rcv_clientput(self, msg):
preference_list = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)[0]
# Determine if we are in the list
if self not in preference_list:
# Forward to the coordinator for this key
_logger.info("put(%s=%s) maps to %s", msg.key, msg.value, preference_list)
coordinator = preference_list[0]
Framework.forward_message(msg, coordinator)
else:
# Use an incrementing local sequence number to distinguish
# multiple requests for the same key
seqno = self.generate_sequence_number()
_logger.info("%s, %d: put %s=%s", self, seqno, msg.key, msg.value)
metadata = (self.name, seqno) # For now, metadata is just sequence number at coordinator
# Send out to preference list, and keep track of who has replied
self.pending_req[PutReq][seqno] = set()
self.pending_put_rsp[seqno] = set()
self.pending_put_msg[seqno] = msg
reqcount = 0
for node in preference_list:
# Send message to get node in preference list to store
putmsg = PutReq(self, node, msg.key, msg.value, metadata, msg_id=seqno)
self.pending_req[PutReq][seqno].add(putmsg)
Framework.send_message(putmsg)
reqcount = reqcount + 1
if reqcount >= DynamoNode.N:
# preference_list may have more than N entries to allow for failed nodes
break
# PART rcv_clientget
def rcv_clientget(self, msg):
preference_list = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)[0]
# Determine if we are in the list
if self not in preference_list:
# Forward to the coordinator for this key
_logger.info("get(%s=?) maps to %s", msg.key, preference_list)
coordinator = preference_list[0]
Framework.forward_message(msg, coordinator)
else:
seqno = self.generate_sequence_number()
self.pending_req[GetReq][seqno] = set()
self.pending_get_rsp[seqno] = set()
self.pending_get_msg[seqno] = msg
reqcount = 0
for node in preference_list:
getmsg = GetReq(self, node, msg.key, msg_id=seqno)
self.pending_req[GetReq][seqno].add(getmsg)
Framework.send_message(getmsg)
reqcount = reqcount + 1
if reqcount >= DynamoNode.N:
# preference_list may have more than N entries to allow for failed nodes
break
# PART rcv_put
def rcv_put(self, putmsg):
_logger.info("%s: store %s=%s", self, putmsg.key, putmsg.value)
self.store(putmsg.key, putmsg.value, putmsg.metadata)
putrsp = PutRsp(putmsg)
Framework.send_message(putrsp)
# PART rcv_putrsp
def rcv_putrsp(self, putrsp):
seqno = putrsp.msg_id
if seqno in self.pending_put_rsp:
self.pending_put_rsp[seqno].add(putrsp.from_node)
if len(self.pending_put_rsp[seqno]) >= DynamoNode.W:
_logger.info("%s: written %d copies of %s=%s so done", self, DynamoNode.W, putrsp.key, putrsp.value)
_logger.debug(" copies at %s", [node.name for node in self.pending_put_rsp[seqno]])
# Tidy up tracking data structures
original_msg = self.pending_put_msg[seqno]
del self.pending_req[PutReq][seqno]
del self.pending_put_rsp[seqno]
del self.pending_put_msg[seqno]
# Reply to the original client
client_putrsp = ClientPutRsp(original_msg)
Framework.send_message(client_putrsp)
else:
pass # Superfluous reply
# PART rcv_get
def rcv_get(self, getmsg):
_logger.info("%s: retrieve %s=?", self, getmsg.key)
(value, metadata) = self.retrieve(getmsg.key)
getrsp = GetRsp(getmsg, value, metadata)
Framework.send_message(getrsp)
# PART rcv_getrsp
def rcv_getrsp(self, getrsp):
seqno = getrsp.msg_id
if seqno in self.pending_get_rsp:
self.pending_get_rsp[seqno].add((getrsp.from_node, getrsp.value, getrsp.metadata))
if len(self.pending_get_rsp[seqno]) >= DynamoNode.R:
_logger.info("%s: read %d copies of %s=? so done", self, DynamoNode.R, getrsp.key)
_logger.debug(" copies at %s", [(node.name, value) for (node, value, _) in self.pending_get_rsp[seqno]])
# Build up all the distinct values/metadata values for the response to the original request
results = set([(value, metadata) for (node, value, metadata) in self.pending_get_rsp[seqno]])
# Tidy up tracking data structures
original_msg = self.pending_get_msg[seqno]
del self.pending_req[GetReq][seqno]
del self.pending_get_rsp[seqno]
del self.pending_get_msg[seqno]
# Reply to the original client, including all received values
client_getrsp = ClientGetRsp(original_msg,
[value for (value, metadata) in results],
[metadata for (value, metadata) in results])
Framework.send_message(client_getrsp)
else:
pass # Superfluous reply
# PART rcvmsg
def rcvmsg(self, msg):
if isinstance(msg, ClientPut):
self.rcv_clientput(msg)
elif isinstance(msg, PutReq):
self.rcv_put(msg)
elif isinstance(msg, PutRsp):
self.rcv_putrsp(msg)
elif isinstance(msg, ClientGet):
self.rcv_clientget(msg)
elif isinstance(msg, GetReq):
self.rcv_get(msg)
elif isinstance(msg, GetRsp):
self.rcv_getrsp(msg)
else:
raise TypeError("Unexpected message type %s", msg.__class__)
# PART get_contents
def get_contents(self):
results = []
for key, value in self.local_store.items():
results.append("%s:%s" % (key, value[0]))
return results
# PART clientnode
class DynamoClientNode(Node):
timer_priority = 17
def put(self, key, metadata, value, destnode=None):
if destnode is None: # Pick a random node to send the request to
destnode = random.choice(DynamoNode.nodelist)
putmsg = ClientPut(self, destnode, key, value, metadata)
Framework.send_message(putmsg)
def get(self, key, destnode=None):
if destnode is None: # Pick a random node to send the request to
destnode = random.choice(DynamoNode.nodelist)
getmsg = ClientGet(self, destnode, key)
Framework.send_message(getmsg)
def rsp_timer_pop(self, reqmsg):
if isinstance(reqmsg, ClientPut): # retry
_logger.info("Put request timed out; retrying")
self.put(reqmsg.key, reqmsg.metadata, reqmsg.value)
elif isinstance(reqmsg, ClientGet): # retry
_logger.info("Get request timed out; retrying")
self.get(reqmsg.key)
# PART clientrcvmsg
def rcvmsg(self, msg):
pass # Client does nothing with results
|
gpl-2.0
| 6,073,878,917,768,870,000
| 43.004032
| 121
| 0.607441
| false
| 3.802439
| false
| false
| false
|
3WiseMen/python
|
pystock/pystock_xingAPI/xarequest.py
|
1
|
23907
|
#xarequest.py
import os
import threading
import time
import win32com.client
from .abstract_component import AbstractQueryProviderComponent
from .abstract_component import AbstractSubscriptionProviderComponent
class XAQueryEvents:
def __init__(self):
self._event_object_connector = XAQueryEvents.event_object_connector
def OnReceiveData(self, szTrCode):
try:
self._event_object_connector.logger.debug("OnReceiveData("+str(szTrCode)+")")
self._event_object_connector.on_receive_data_arg = szTrCode
#원래는 outblock 접근코드가 이 쓰레드에 있어야하지만 전체 query코드가 블럭이므로
#query 자체가 나가지 않으므로 이 쓰레드에서 다른 쓰레드로 나가더라도 안전함
#하지만 subscribe의 경우 다름
except Exception as ex:
self._event_object_connector.logger.warn("OnReceiveData error: %s", ex)
self._event_object_connector.logger.debug(ex, exc_info=True)
return None
finally:
#? tr_code 검사가 필요한가?
self._event_object_connector.on_receive_data_event.set()
def OnReceiveMessage(self, bIsSystemError, szMessageCode, szMessage):
try:
self._event_object_connector.logger.debug("OnReceiveMessage("+", ".join([str(bIsSystemError), str(szMessageCode), str(szMessage)])+")")
self._event_object_connector.on_receive_message_arg = (bIsSystemError, szMessageCode, szMessage)
except Exception as ex:
self._event_object_connector.logger.warn("OnReceiveMessage error: %s", ex)
self._event_object_connector.logger.debug(ex, exc_info=True)
return None
finally:
self._event_object_connector.on_receive_message_event.set()
class XARealEvents:
def __init__(self):
self._event_object_connector = XARealEvents.event_object_connector
def OnReceiveRealData(self, *args):
start_time = time.time()
try:
self._event_object_connector.logger.debug("OnReceiveRealData("+str(args)+")")
outblock = self._read_outblocks(self._event_object_connector.res_blocks)
self._event_object_connector.logger.debug(str(outblock))
self._event_object_connector.queue.put(outblock)
except Exception as ex:
self._event_object_connector.logger.warn("OnReceiveRealData error: %s", ex)
self._event_object_connector.logger.debug(ex, exc_info=True)
return None
finally:
self._event_object_connector.logger.debug('[It took %fs]', time.time() - start_time)
pass
def _read_outblocks(self, res_blocks, comp_yn_flag = False):
outblocks = filter(lambda b:not b['is_input'], res_blocks)
ret = dict()
for block in outblocks:
if not block['is_occurs']:
sub_ret = dict()
for arg in block['args']:
sub_ret[arg['code']] = self._event_object_connector.xaquery_xareal.GetFieldData(block['bname'], arg['code'])
else:
sub_ret = list()
block_count = 0
if comp_yn_flag: # if compressed?
decompressed_size = self._event_object_connector.xaquery_xareal.Decompress(block['bname'])
if decompressed_size > 0:
block_count = self._event_object_connector.xaquery_xareal.GetBlockCount(block['bname'])
else:
block_count = self._event_object_connector.xaquery_xareal.GetBlockCount(block['bname'])
for occur in range(block_count):
sub_sub_ret = dict()
for arg in block['args']:
sub_sub_ret[arg['code']] = self._event_object_connector.xaquery_xareal.GetFieldData(block['bname'], arg['code'])
sub_ret.append(sub_sub_ret)
ret[block['bname']] = sub_ret
return ret
class EventObjectConnector:
logger = None
class XARequest(AbstractQueryProviderComponent, AbstractSubscriptionProviderComponent):
def __init__(self, xasession, res_info, query_block_timeout):
self._res_info = res_info
self._xasession = xasession
self.query_block_timeout = float(query_block_timeout)
def init(self):
self.logger.info("Initializing XARequest_" + self._res_info['header']['tr_code'])
self.event_object_connector = EventObjectConnector()
self.event_object_connector.logger = self.logger.getChild(self._res_info['header']['tr_code'] + "_events")
if self._res_info['header']['is_query']:
self._init_xaquery()
else:
self._init_xareal()
self.logger.debug('self.xaquery_xareal.GetTrCode():%s', self.xaquery_xareal.GetTrCode())
def getAvailableQueryCodeSet(self):
if self._res_info['header']['is_query']:
return {'xing.'+self._res_info['header']['tr_code']}
else:
return {}
def getAvailableSubscriptionCodeSet(self):
if not self._res_info['header']['is_query']:
return {'xing.'+self._res_info['header']['tr_code']}
else:
return {}
def query(self, query_code, arg_set):
try:
if not self._res_info['header']['is_query']:
return None
if query_code.startswith('xing.'):
query_code = query_code.split('.')[1]
else:
return None
self.logger.info("query has been received(%s)", query_code)
self._verify_arguments(query_code, arg_set)
#self.logger.debug("argumentd verified")
# 실제 inblock 처리
comp_yn_flag = self._write_inblocks(arg_set)
self.event_object_connector.on_receive_message_event.clear()
self.event_object_connector.on_receive_data_event.clear()
continu_query = True if arg_set.get('continue_query') else False
#2015-05-07 period limit를 먼저 체크하고 그 후에 초당 콜 제한을 체크
if self.limit_period != 0:
current_time = time.time()
#원방법은 list에 요청된 tr요청 시간을 모두 기록해 두고 정확하게 10분이 초과된 요청만 리스트에서 제거해가면서 요청 수 초과를 확인
#self.last_calls = list(filter(lambda x:x>=(current_time-self.limit_period), self.last_calls))
#2015-05-26 xingAPI의 period limit 체크 방법이 예상보다 단순하여 제한회피 조건을 이에 맞춰서 다시 구현(원 방법은 바로 위 라인)
#eBEST 문의결과 최초 tr요청 시 시간을 기록하고 10분안에 200회를 초과한 요청이 들어오는지 확인, 10분이 지나게 되면 다시 이후의 최초 tr요청을 기준으로 반복
if len(self.last_calls) > 0 and current_time - self.last_calls[0] > self.limit_period:
self.last_calls = []
if len(self.last_calls) >= self.limit_call_count:
#제한에 걸림, 얼마나 sleep해야하는지 확인
tmp_sleep_time = self.last_calls[0] + self.limit_period - current_time
self.last_calls = []
if tmp_sleep_time > 0:
self.logger.debug('sleep for period limit:'+str(tmp_sleep_time))
time.sleep(tmp_sleep_time)
# 초당 요청제한 준수
tmp_sleep_time = self.last_call + self.minimum_interval_between_calls - time.time()
if tmp_sleep_time > 0: # 0.01초 루프를 여러번 보는것 보다 sleep타임 계산하는것이 적절 - 2015-05-07
self.logger.debug('sleep:'+str(tmp_sleep_time))
time.sleep(tmp_sleep_time)
#while time.time() - self.last_call < self.minimum_interval_between_calls:
#self.logger.debug('sleep:'+str(0.01))
#time.sleep(0.01)
request_ret = self.xaquery_xareal.Request(continu_query)
# 리턴값 기반 에러처리
while request_ret < 0:
if request_ret in [-21,-15,-16,-17]:
#될때까지 재시도 해야하는 에러
self.logger.warn("Warnning request_ret:"+str(request_ret))
time.sleep(self.minimum_interval_between_calls + 0.01)
request_ret = self.xaquery_xareal.Request(continu_query)
elif request_ret in [-1,-2,-3,-4,-7,-13,-14,-15]:
#xasession을 다시 시작해야하는 에러
self.logger.error("Error request_ret:"+str(request_ret))
self._xasession.reconnect()
break
elif request_ret in [-5,-6,-8,-9,-10,-11,-12,-18,-19,-20,-22,-23,-24,-25]:
#구제 불능 에러, 이메일로 알려야함
self.logger.critical("Critical request_ret:"+str(request_ret))
#TODO shutdown 코드 추가
# exit()
#우선은 재접속 시도
self.logger.error("Error request_ret:"+str(request_ret))
self._xasession.reconnect()
break
self.logger.debug("request_ret:"+str(request_ret))
#TODO 에러처리 필수!
if request_ret < 0:
self.logger.warn("Request return:"+str(request_ret))
return None
else:
if not self.event_object_connector.on_receive_message_event.wait(self.query_block_timeout):
#timeout
self._xasession.reconnect()
return None
self.last_call = time.time()
#2015-05-07 호출 시간 수집
if self.limit_period != 0:
self.last_calls.append(time.time())
#self.event_object_connector.on_receive_message_arg = (bIsSystemError, szMessageCode, szMessage)
if not self.event_object_connector.on_receive_data_event.wait(self.query_block_timeout):
#timeout
self._xasession.reconnect()
return None
return self._read_outblocks(self._res_info['block'], comp_yn_flag)
except Exception as ex:
self.logger.warn("XARequest_" + self._res_info['header']['tr_code'] + " query error: %s", ex)
self.logger.debug(ex, exc_info=True)
return None
finally:
pass
def subscribe(self, subscribe_code, arg_set, queue):
try:
if self._res_info['header']['is_query']:
return None
if subscribe_code.startswith('xing.'):
subscribe_code = subscribe_code.split('.')[1]
else:
return None
self.logger.info("subscribe has been received(%s)", subscribe_code)
self._verify_arguments(subscribe_code, arg_set)
#self.logger.debug("arguments verified")
if self._subscribe_key_code is not None:
key = list(arg_set.values())[0][self._subscribe_key_code]
if self.event_object_connector.queue.register(queue, key):
self._write_inblocks_for_subscription(arg_set)
self.xaquery_xareal.AdviseRealData()
self.logger.debug("Actual AdviseRealData called(key=%s)", key)
else:
self.logger.debug("Subscription add to existing queue(key=%s)", key)
else:
if self.event_object_connector.queue.register(queue):
self._write_inblocks_for_subscription(arg_set)
self.xaquery_xareal.AdviseRealData()
self.logger.debug("Actual AdviseRealData called(no key)")
else:
self.logger.debug("Subscription add to existing queue(no key)")
return True
except Exception as ex:
self.logger.warn("XARequest_" + self._res_info['header']['tr_code'] + " subscribe error: %s", ex)
self.logger.debug(ex, exc_info=True)
return None
finally:
pass
def unsubscribe(self, subscribe_code, arg_set, queue):
try:
if self._res_info['header']['is_query']:
return None
if subscribe_code.startswith('xing.'):
subscribe_code = subscribe_code.split('.')[1]
else:
return None
self.logger.info("unsubscribe has been received(%s)", subscribe_code)
self._verify_arguments(subscribe_code, arg_set)
#self.logger.debug("arguments verified")
if self._subscribe_key_code is not None:
self.logger.debug("%s has a key", subscribe_code)
key = list(arg_set.values())[0][self._subscribe_key_code]
self.logger.debug("unregister from queue")
if self.event_object_connector.queue.unregister(queue, key):
self.logger.debug("call UnadviseRealDataWithKey(%s)", key)
self.xaquery_xareal.UnadviseRealDataWithKey(key)
else:
self.logger.debug("%s has no key", subscribe_code)
if self.event_object_connector.queue.unregister(queue):
self.xaquery_xareal.AdviseRealData()
#self.logger.debug("unsubscribe finished")
return True
except Exception as ex:
self.logger.warn("XARequest_" + self._res_info['header']['tr_code'] + " unsubscribe error: %s", ex)
self.logger.warn(ex, exc_info=True)
return None
finally:
pass
# internal methods
def _write_inblocks(self, arg_set):
comp_yn_flag = False
for block_name in arg_set.keys():
if block_name == 'continue_query':
continue # it's not real inblock
if isinstance(arg_set[block_name], dict): # non=occurs
for arg_code in arg_set[block_name].keys():
if arg_set[block_name][arg_code] is not None:
self.xaquery_xareal.SetFieldData(block_name, arg_code, 0, arg_set[block_name][arg_code])
if (not comp_yn_flag) and arg_code.lower() == 'comp_yn' and str(arg_set[block_name][arg_code]) == 'Y':
comp_yn_flag = True # compress
else: # occurs
block_count = len(arg_set[block_name])
self.xaquery_xareal.SetBlockCount(block_name, block_count)
for i, arg_set1 in enumerate(arg_set[block_name]):
for arg_code in arg_set1.keys():
self.xaquery_xareal.SetFieldData(block_name, arg_code, i, arg_set1[arg_code])
return comp_yn_flag
def _read_outblocks(self, res_blocks, comp_yn_flag = False):
outblocks = filter(lambda b:not b['is_input'], res_blocks)
ret = dict()
for block in outblocks:
if not block['is_occurs']:
sub_ret = dict()
for arg in block['args']:
sub_ret[arg['code']] = self.xaquery_xareal.GetFieldData(block['bname'], arg['code'], 0)
else:
sub_ret = list()
block_count = 0
if comp_yn_flag: # if compressed?
decompressed_size = self.xaquery_xareal.Decompress(block['bname'])
if decompressed_size > 0:
block_count = self.xaquery_xareal.GetBlockCount(block['bname'])
else:
block_count = self.xaquery_xareal.GetBlockCount(block['bname'])
for occur in range(block_count):
sub_sub_ret = dict()
for arg in block['args']:
sub_sub_ret[arg['code']] = self.xaquery_xareal.GetFieldData(block['bname'], arg['code'], occur)
sub_ret.append(sub_sub_ret)
ret[block['bname']] = sub_ret
return ret
def _verify_arguments(self, tr_code, arg_set):
# 전달받은 arg_set 검사 시작
if self._res_info['header']['tr_code'] != tr_code:
raise Exception('Wrong tr-code has been received (%s)', tr_code)
inblocks = list(filter(lambda b:b['is_input'], self._res_info['block']))
arg_set_key_set = set(arg_set.keys())
inblocks_bname_set = set(map(lambda b:b['bname'], inblocks))
if arg_set_key_set-inblocks_bname_set-{'continue_query'}:
raise Exception('Unsupported inblock name has been received (%s)', arg_set_key_set-inblocks_bname_set)
for block_name in arg_set.keys():
if block_name == 'continue_query':
continue
inblock = list(filter(lambda bn:bn['bname']==block_name, inblocks))[0]
if inblock['is_occurs'] and isinstance(arg_set[block_name], dict):
raise Exception("Unexpected dict('{}') for occurs found, list('[]]') should be here instead")
if not inblock['is_occurs'] and isinstance(arg_set[block_name], list):
raise Exception("Unexpected list('[]') for non-occurs found, dict('{}') should be here instead")
if isinstance(arg_set[block_name], dict):
arg_set_keys = set(arg_set[block_name].keys())
else:
arg_set_keys = set()
for a in arg_set[block_name]:
arg_set_keys.update(set(a.keys()))
arg_code_set = set(map(lambda b:b['code'], inblock['args']))
if arg_set_keys-arg_code_set:
raise Exception('Unsupported argument code has been received (%s)', str(arg_set_keys-arg_code_set))
# 전달받은 arg_set 검사 끝
def _init_xaquery(self):
self.event_object_connector.on_receive_data_event = threading.Event()
self.event_object_connector.on_receive_message_event = threading.Event()
XAQueryEvents.event_object_connector = self.event_object_connector
self.xaquery_xareal = win32com.client.DispatchWithEvents("XA_DataSet.XAQuery",XAQueryEvents)
self.xaquery_xareal.LoadFromResFile(os.path.join(self._xasession.res_dir_path, self._res_info['header']['tr_code']+'.res'))
#print(dir(self.xaquery_xareal.GetTRCountPerSec.__self__))
count_per_sec_limit = self.xaquery_xareal.GetTRCountPerSec(self._res_info['header']['tr_code'])
self.logger.debug("self.xaquery_xareal.GetTRCountPerSec(%s)", self._res_info['header']['tr_code'])
self.logger.debug('count_per_sec_limit(%s):%s', self._res_info['header']['tr_code'], str(count_per_sec_limit))
#2015-05-26 신규 추가된 GetTRCountBaseSec api를 이용한 요청 제한 추가
count_per_sec_base = self.xaquery_xareal.GetTRCountBaseSec(self._res_info['header']['tr_code'])
self.logger.debug("self.xaquery_xareal.GetTRCountBaseSec(%s)", self._res_info['header']['tr_code'])
self.logger.debug('count_per_sec_base(%s):%s', self._res_info['header']['tr_code'], str(count_per_sec_base))
if count_per_sec_limit:
self.minimum_interval_between_calls = count_per_sec_base / count_per_sec_limit
else:
self.minimum_interval_between_calls = 0
self.logger.debug('self.minimum_interval_between_calls:%s', str(self.minimum_interval_between_calls))
self.last_call = 0
#2015-05-07 period limit을 위한 변수 설정
self.limit_call_count, self.limit_period = self._getTrCountPerPeriod(self._res_info['header']['tr_code'])
#self.limit_period 기간 안에 self.limit_call_count 횟수 이상의 콜은 금지(가능하지만 페널티(추가 딜레이)가 주어짐)
self.last_calls = []
def _init_xareal(self):
XARealEvents.event_object_connector = self.event_object_connector
self.xaquery_xareal = win32com.client.DispatchWithEvents("XA_DataSet.XAReal",XARealEvents)
self.xaquery_xareal.LoadFromResFile(os.path.join(self._xasession.res_dir_path, self._res_info['header']['tr_code']+'.res'))
self.event_object_connector.res_blocks = self._res_info['block']
self.event_object_connector.xaquery_xareal = self.xaquery_xareal
#subscribe의 inblock은 1개밖에 없음
args = list(filter(lambda b:b['is_input'], self.event_object_connector.res_blocks))[0]['args']
if len(args) > 0:
#key 있음
self._subscribe_key_code = args[0]['code']
self.event_object_connector.queue = QueueConnectAndDispatcher(self._subscribe_key_code)
else:
#key 없음
self._subscribe_key_code = None
self.event_object_connector.queue = QueueConnectAndDispatcherWithoutKey()
# internal methods
def _write_inblocks_for_subscription(self, arg_set):
for block_name in arg_set.keys():
for arg_code in arg_set[block_name].keys():
if arg_set[block_name][arg_code] is not None:
self.xaquery_xareal.SetFieldData(block_name, arg_code, arg_set[block_name][arg_code])
def finalize_com_object(self):
if hasattr(self, 'xaquery_xareal'):
del(self.xaquery_xareal)
if hasattr(self.event_object_connector, 'xaquery_xareal'):
del(self.event_object_connector.xaquery_xareal)
#우선은 홈페이지에 공지된 trcode 리스트로 period 제한을 두는 코드를 구분
#차후에는 api등을 통해 얻어 올 수 있도록 해야할 것(제공한다면) - 2015-05-07
_tmp_periodLimitedTrCodes=["CCEAQ01100",
"CCEAQ06000",
"CCEAQ10100",
"CCEAQ50600",
"CCEBQ10500",
"CDPCQ04700",
"CDPCQ13900",
"CDPCQ14400",
"CEXAQ21100",
"CEXAQ21200",
"CEXAQ31100",
"CEXAQ31200",
"CEXAQ44200",
"CFOAQ00600",
"CFOAQ10100",
"CFOAQ50400",
"CFOBQ10300",
"CFOBQ10500",
"CFOBQ10800",
"CFOEQ11100",
"CFOEQ82600",
"CFOEQ82700",
"CFOFQ02400",
"CFXBQ03700",
"CFXBQ03900",
"CFXBQ07000",
"CFXBQ08400",
"CFXBQ08600",
"CFXBQ08700",
"CFXBQ08800",
"CFXBT03600",
"ChartExcel",
"ChartIndex",
"CIDBQ01400",
"CIDBQ01500",
"CIDBQ01800",
"CIDBQ02400",
"CIDBQ03000",
"CIDBQ05300",
"CIDEQ00800",
"CLNAQ00100",
"CSPAQ00600",
"CSPAQ02200",
"CSPAQ02300",
"CSPAQ03700",
"CSPAQ12200",
"CSPAQ12300",
"CSPAQ13700",
"CSPBQ00200",
"CSPBQ01300",
"f8301",
"f8307",
"f8309",
"f8311",
"FOCCQ33600",
"FOCCQ33700",
"MMDAQ91200",
"o3101",
"o3103",
"o3104",
"o3105",
"o3106",
"o3107",
"o3116",
"o3117",
"t1302",
"t1305",
"t1308",
"t1404",
"t1405",
"t1449",
"t1471",
"t1475",
"t1485",
"t1514",
"t1516",
"t1532",
"t1537",
"t1602",
"t1603",
"t1615",
"t1617",
"t1621",
"t1631",
"t1632",
"t1633",
"t1640",
"t1662",
"t1664",
"t1665",
"t1701",
"t1702",
"t1717",
"t1752",
"t1764",
"t1771",
"t1809",
"t1825",
"t1826",
"t1833",
"t1901",
"t1902",
"t1903",
"t1904",
"t1921",
"t1926",
"t1927",
"t1941",
"t1954",
"t2106",
"t2203",
"t2209",
"t2405",
"t2421",
"t2541",
"t2545",
"t2805",
"t2813",
"t2814",
"t2816",
"t2833",
"t3102",
"t3202",
"t3320",
"t3325",
"t3341",
"t3401",
"t3518",
"t3521",
"t4201",
"t8405",
"t8406",
"t8408",
"t8409",
"t8411",
"t8412",
"t8413",
"t8414",
"t8415",
"t8416",
"t8417",
"t8418",
"t8419",
"t8424",
"t8427",
"t8428",
"t8429",]
def _getTrCountPerPeriod(self, trcode):
if trcode in self._tmp_periodLimitedTrCodes:
return 200, 600 #600초당 200회 까지 조회 가능
#return 5, 10 #테스트코드 - 10초당 5회 까지 조회 가능
else:
return 0, 0
class QueueConnectAndDispatcher:
def __init__(self, key_code):
self._key_code = key_code
self._queues = dict()
self._lock = threading.Lock()
self._queue_count = 0
def register(self, queue, key):
new_key = False
with self._lock:
queue_set = self._queues.get(key)
if queue_set:
queue_set.add(queue)
else:
queue_set = {queue}
self._queues[key] = queue_set
new_key = True
self._queue_count += 1
return new_key
def unregister(self, queue, key):
with self._lock:
remove_key = False
queue_set = self._queues.get(key)
queue_set.remove(queue)
if len(queue_set) == 0:
self._queues.pop(key)
remove_key = True
self._queue_count -= 1
return remove_key
def getRegisteredQueuesCount():
with self._lock:
return self._queue_count
#queue part
def task_done(self):
"""지원안함"""
raise NotImplementedError('This should not be called')
def join(self):
with self._lock:
for queue in self._queues.values():
queue.join()
def qsize(self):
return 0
def empty(self):
return True
def full(self):
return False
#def put(self, item, block=True, timeout=None):
def put(self, item):
#key 추출
key = list(item.values())[0][self._key_code]
with self._lock:
queue_set = self._queues.get(key)
if queue_set:
for queue in queue_set:
queue.put(item)
def get(self, block=True, timeout=None):
"""지원안함"""
raise NotImplementedError('This should not be called')
def put_nowait(self, item):
"""지원안함"""
raise NotImplementedError('This should not be called')
def get_nowait(self):
"""지원안함"""
raise NotImplementedError('This should not be called')
class QueueConnectAndDispatcherWithoutKey:
def __init__(self):
self._queues = set()
self._lock = threading.Lock()
def register(self, queue):
with self._lock:
self._queues.add(queue)
return len(self._queues)==1
def unregister(self, queue):
with self._lock:
self._queues.remove(queue)
return len(self._queues)==0
def getRegisteredQueuesCount():
with self._lock:
return len(self._queues)
#dummy queue part
def task_done(self):
"""지원안함"""
raise NotImplementedError('This should not be called')
def join(self):
with self._lock:
for queue in self._queues:
queue.join()
def qsize(self):
return 0
def empty(self):
return True
def full(self):
return False
#def put(self, item, block=True, timeout=None):
def put(self, item):
with self._lock:
for queue in self._queues:
queue.put(item)
pass
def get(self, block=True, timeout=None):
"""지원안함"""
raise NotImplementedError('This should not be called')
def put_nowait(self, item):
"""지원안함"""
raise NotImplementedError('This should not be called')
def get_nowait(self):
"""지원안함"""
raise NotImplementedError('This should not be called')
|
mit
| 6,547,232,836,483,975,000
| 31.366959
| 138
| 0.660839
| false
| 2.479465
| false
| false
| false
|
hosseinoliabak/learningpy
|
13_51_googleApi.py
|
1
|
2968
|
'''
Calling a JSON API
In this assignment, you will write a Python program somewhat similar to
http://www.py4e.com/code3/geojson.py. The program will prompt for a location,
contact a web service and retrieve JSON for the web service and parse that data,
and retrieve the first place_id from the JSON. A place ID is a textual
identifier that uniquely identifies a place as within Google Maps.
API End Points:
To complete this assignment, you should use this API endpoint that has a static
subset of the Google Data:
http://py4e-data.dr-chuck.net/geojson?
This API uses the same parameters (sensor and address) as the Google API.
This API also has no rate limit so you can test as often as you like. If you
visit the URL with no parameters, you get a list of all of the address values
which can be used with this API.
To call the API, you need to provide address that you are requesting as the
address= parameter that is properly URL encoded using the urllib.urlencode()
fuction as shown in http://www.py4e.com/code3/geojson.py
Test Data / Sample Execution:
You can test to see if your program is working with a location of
"South Federal University" which will have a place_id of
"ChIJJ8oO7_B_bIcR2AlhC8nKlok".
+-------------Sample Execution-------------+
|$ python3 solution.py |
|Enter location: South Federal University |
|Retrieving http://... |
|Retrieved 2101 characters |
|Place id ChIJJ8oO7_B_bIcR2AlhC8nKlok |
+------------------------------------------+
Turn In:
Please run your program to find the place_id for this location:
+++ University of Malaya +++
Make sure to enter the name and case exactly as above and enter the
place_id and your Python code below. Hint: The first seven characters of the
place_id are "ChIJC9_ ..."
Make sure to retreive the data from the URL specified above and not the normal
Google API. Your program should work with the Google API - but the place_id may
not match for this assignment.
'''
import urllib.request
import json
# Note that Google is increasingly requiring keys for this API
#sServiceUrl = 'http://maps.googleapis.com/maps/api/geocode/json?'
#To use googleapis uncomment the above line and comment the below line
sServiceUrl = 'http://py4e-data.dr-chuck.net/geojson?'
sAddress = input('Enter location [South Federal University]: ')
if not sAddress:
sAddress = 'South Federal University'
sUrl = sServiceUrl + urllib.parse.urlencode({'address': sAddress})
print('Retrieving', sUrl)
httpResponse = urllib.request.urlopen(sUrl)
sData = httpResponse.read().decode()
try:
dJsonData = json.loads(sData)
except:
dJsonData = None
sPlaceID = dJsonData['results'][0]['place_id']
print('place_id:', sPlaceID)
fLat = dJsonData['results'][0]['geometry']['location']['lat']
fLng = dJsonData["results"][0]["geometry"]["location"]["lng"]
print('lat', fLat, 'lng', fLng)
sLocation = dJsonData['results'][0]['formatted_address']
print(sLocation)
|
gpl-3.0
| 1,357,545,549,673,138,400
| 37.545455
| 80
| 0.723383
| false
| 3.550239
| false
| false
| false
|
mgp/sharebears
|
sharebears/url_decoder_github.py
|
1
|
5196
|
import re
import url_decoder
from url_decoder import UrlDecoder, UrlDecoderException
class _GitHubUrlDecoder(UrlDecoder):
@staticmethod
def can_decode_url(url, parsed_url):
if not parsed_url.netloc.startswith("github."):
return False
return True
class GitHubRepositoryOwnerItem:
"""The owner in a GitHubRepositoryItem."""
def __init__(self, decoded_owner):
self.login = decoded_owner["login"]
self.avatar_url = decoded_owner["avatar_url"]
self.html_url = decoded_owner["html_url"]
class GitHubRepositoryItem:
"""A GitHub repository for a RenderableItem."""
def __init__(self, decoded_url):
self.name = decoded_url["name"]
self.description = decoded_url["description"]
self.html_url = decoded_url["html_url"]
self.language = decoded_url["language"]
self.owner = GitHubRepositoryOwnerItem(decoded_url["owner"])
class GitHubRepositoryUrlDecoder(_GitHubUrlDecoder):
"""Renders a GitHub repository."""
_PATH_REGEX = re.compile("^/(?P<owner>\w+)/(?P<repo>\w+)$")
def __init__(self, github_client):
self.github_client = github_client
@staticmethod
def name():
return "github-repository"
@staticmethod
def _match_parsed_url(parsed_url):
return GitHubRepositoryUrlDecoder._PATH_REGEX.match(parsed_url.path)
@staticmethod
def can_decode_url(url, parsed_url):
if not _GitHubUrlDecoder.can_decode_url(url, parsed_url):
return False
elif not GitHubRepositoryUrlDecoder._match_parsed_url(parsed_url):
return False
return True
def _filter_json(self, json):
"""Filters the JSON from https://developer.github.com/v3/repos/#get"""
# Filter the repository owner.
owner_json = json["owner"]
filtered_owner_json = url_decoder.filter_json(owner_json,
"login", "avatar_url", "html_url")
# Filter the repository.
filtered_json = url_decoder.filter_json(json,
"name",
"description",
"html_url",
"language")
filtered_json["owner"] = filtered_owner_json
return filtered_json
def decode_url(self, url, parsed_url):
match = self._match_parsed_url(parsed_url)
if not match:
raise UrlDecoderException("URL is not decodeable: %s" % parsed_url)
owner = match.group("owner")
repo = match.group("repo")
json = self.github_client.get_repository(owner, repo)
return self._filter_json(json)
def item_for_rendering(self, decoded_url):
return GitHubRepositoryItem(decoded_url)
class GitHubCommitUserItem:
"""A user in a GitHubCommitItem."""
def __init__(self, decoded_user):
self.name = decoded_user["name"]
self.email = decoded_user["email"]
self.date = url_decoder.to_datetime(decoded_user["date"])
class GitHubCommitItem:
"""A GitHub commit for a RenderableItem."""
def __init__(self, decoded_url):
self.sha = decoded_url["sha"]
self.url = decoded_url["url"]
self.message = decoded_url["message"]
self.author = GitHubCommitUserItem(decoded_url["author"])
self.committer = GitHubCommitUserItem(decoded_url["committer"])
class GitHubCommitUrlDecoder(_GitHubUrlDecoder):
"""Renders a commit belonging to a GitHub repository."""
_PATH_REGEX = re.compile("^/(?P<owner>\w+)/(?P<repo>\w+)/commit/(?P<sha>\w+)$")
def __init__(self, github_client):
self.github_client = github_client
@staticmethod
def name():
return "github-commit"
@staticmethod
def _match_parsed_url(parsed_url):
return GitHubCommitUrlDecoder._PATH_REGEX.match(parsed_url.path)
@staticmethod
def can_decode_url(url, parsed_url):
if not _GitHubUrlDecoder.can_decode_url(url, parsed_url):
return False
elif not GitHubCommitUrlDecoder._match_parsed_url(parsed_url):
return False
return True
def _filter_json(self, json):
"""Filters the JSON from https://developer.github.com/v3/git/commits/#get-a-commit"""
return url_decoder.filter_json(json,
"sha",
"url",
"author",
"committer",
"message")
def decode_url(self, url, parsed_url):
match = self._match_parsed_url(parsed_url)
if not match:
raise UrlDecoderException("URL is not decodeable: %s" % parsed_url)
owner = match.group("owner")
repo = match.group("repo")
sha = match.group("sha")
json = self.github_client.get_commit(owner, repo, sha)
return self._filter_json(json)
def item_for_rendering(self, decoded_url):
return GitHubCommitItem(decoded_url)
class GitHubGistItem:
"""A GitHub Gist for a RenderableItem."""
def __init__(self, decoded_url):
self.url = decoded_url["url"]
class GitHubGistUrlDecoder(UrlDecoder):
"""Embeds a Gist."""
_PATH_REGEX = re.compile("^/\w+/\w+$")
@staticmethod
def name():
return "github-gist"
@staticmethod
def can_decode_url(url, parsed_url):
if not parsed_url.netloc.startswith("gist.github."):
return False
elif not GitHubGistUrlDecoder._PATH_REGEX.match(parsed_url.path):
return False
return True
def decode_url(self, url, parsed_url):
# Use an embedded Gist.
return { "url": url }
def item_for_rendering(self, decoded_url):
return GitHubGistItem(decoded_url)
|
apache-2.0
| 2,276,164,251,358,414,600
| 26.0625
| 89
| 0.672825
| false
| 3.57605
| false
| false
| false
|
zhlinh/leetcode
|
0142.Linked List Cycle II/solution.py
|
1
|
1073
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: zhlinhng@gmail.com
Version: 0.0.1
Created Time: 2016-03-17
Last_modify: 2016-03-17
******************************************
'''
'''
Given a linked list,
return the node where the cycle begins.
If there is no cycle, return null.
Note: Do not modify the linked list.
Follow up:
Can you solve it without using extra space?
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
fast, slow, entry = head, head, head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
while slow != entry:
entry = entry.next
slow = slow.next
return entry
return None
|
apache-2.0
| -6,262,251,490,648,963,000
| 22.844444
| 44
| 0.50699
| false
| 3.930403
| false
| false
| false
|
harkrishan/TwitterBot
|
TwitterBot.py
|
1
|
3213
|
#!/usr/bin/python
# Importing Twython
from twython import Twython
# Importing Wolfram
import urllib
import wolframalpha
import urllib2
from xml.etree import ElementTree as etree
import sys
import time
#Setting variables for Twitter
app_key = "*****YOUR APP KEY******"
app_secret = "****YOUR APP SECRET*****"
oauth_token = "*****YOUR OAUTH TOKEN******"
oauth_token_secret = "*****YOUR OAUTH TOKEN SECRET*****"
#Twython Object
twitter = Twython(app_key, app_secret, oauth_token, oauth_token_secret)
#Setting variables for Wolfram
app_id = "*****YOUR WOLFRAMALPHA APP ID******"
#Wolfram Object
client = wolframalpha.Client(app_id)
#File to store the last tweet's id
file = "/twitterBot/max_id.txt"
#Function to ask Wolframalpha and return the answer
def wolfram_alpha(str):
str=str.lower()
str=str.replace("@berry_bots"," ")
str=str.replace("."," ")
str=str.replace("hi berry bots"," ")
str=str.replace("hello berry bots"," ")
str=str.replace("berry bots"," ")
str=str.replace("berry bot"," ")
str=str.strip()
str=str.encode('utf-8')
res = client.query(str)
if len(res.pods)>0:
text=" "
pod = res.pods[1]
if pod.text:
text = pod.text.encode('utf-8')
else:
text = 'x'
else:
text = 'x'
return text;
#Reading file for last tweet's id
max=open(file,"r")
max.seek(0,0)
mid=max.read(18)
search_results = None
#Searching Twitter for Questions
try:
search_results = twitter.search(q="@berry_bots",lang="en",count=5,since_id=str(mid))
except TwythonError as err:
print err
max.close()
mxd = None
#Traversing the tweets searched and tweeting back the answers
try:
if search_results:
for tweet in reversed(search_results["statuses"]):
mxd = tweet["id_str"]
answer = wolfram_alpha(tweet["text"])
if answer!='x':
reply_id = tweet["id_str"]
reply_user = '@' + tweet['user']['screen_name']
reply_user = reply_user.encode('utf-8')
if len(answer)>123:
n=123
for i in range(0, len(answer), n):
ans=answer[i:i+n]
ans = reply_user + ' ' + ans
twitter.update_status(status=ans,in_reply_to_status_id=reply_id)
time.sleep(5)
else:
answer = reply_user + ' ' + answer
twitter.update_status(status=answer,in_reply_to_status_id=reply_id)
time.sleep(5)
except TwythonError as e:
print e
#Writing the id if the last replied tweet into the file
if mxd:
max=open(file,"w")
max.write(mxd)
max.close()
|
gpl-2.0
| 5,662,756,556,183,229,000
| 29.894231
| 112
| 0.498599
| false
| 3.815914
| false
| false
| false
|
alpha-beta-soup/errorgeopy
|
tests/test_geocoders.py
|
1
|
6649
|
import os
import encodings.idna
import pytest
import yaml
import geopy
import shapely
import errorgeopy.geocoders
@pytest.fixture
def addresses():
return (
'66 Great North Road, Grey Lynn, Auckland, 1021, New Zealand',
'Grey Lynn, Auckland, 1021, New Zealand',
'High Street, Lower Hutt, New Zealand',
'10 Aurora Street, Petone, Lower Hutt, Wellington', # Doesn't produce enough for a cluster
'10 Aurora Street, Petone, Lower Hutt, 5012', # Doesn't produce enough for a cluster
'Oriental Street, Lower Hutt, New Zealand',
'Oriental Bay, Wellington, New Zealand',
'Oriental Vay, Wellington, NZ', # Deliberate typo "Vay",
'ZJ6AZ2Ixgp1or4O' # Deliberate nonsense, random string
)
@pytest.fixture
def addresses_reverse():
return (
(-37.8004971, 174.868439), # Near me!
(-41.2296258, 174.8828724), # 10 Aurora Street, Petone, Lower Hutt
(-41.1945832, 174.9403476), # High Street, Lower Hutt
(-41.2910862, 174.7882479), # Oriental Bay, Wellington
# (-90, 0) # South Pole
# (-91, 181) # Out of range
)
@pytest.fixture
def configfile():
return os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'config.yml')
@pytest.fixture
def load_configfile():
return yaml.load(open(configfile(), 'r'))
@pytest.fixture
def geocoderpool_fromfile():
return errorgeopy.geocoders.GeocoderPool.fromfile(configfile(), yaml.load)
@pytest.fixture
def geocoderpool(load=True):
if load:
return errorgeopy.geocoders.GeocoderPool(load_configfile())
else:
# Default settings (free no-sign-up APIs)
return errorgeopy.geocoders.GeocoderPool()
def test_load_gpool_from_file_with_caller():
gpool = geocoderpool_fromfile()
assert isinstance(
gpool, errorgeopy.geocoders.GeocoderPool
), 'failed to produce a GeocoderPool object on instantiation'
assert gpool.config == yaml.load(open(configfile(
), 'r')), 'configuration was mutated on instantiation'
assert getattr(gpool._geocoders, '__iter__',
False), 'GeocoderPool._geocoders is not iterable'
assert all([
issubclass(x.geocoder.__class__, geopy.geocoders.base.Geocoder)
for x in gpool.geocoders
]), 'not all of the GeocoderPool geocoders are geopy.Geocoder objects'
def test_load_gpool_from_file_without_caller():
gpool = geocoderpool()
assert isinstance(
gpool, errorgeopy.geocoders.GeocoderPool
), 'failed to produce a GeocoderPool object on instantiation'
assert gpool.config == load_configfile(
), 'configuration was mutated on instantiation'
assert getattr(gpool._geocoders, '__iter__',
False), 'GeocoderPool._geocoders is not iterable'
assert all([
issubclass(x.geocoder.__class__, geopy.geocoders.base.Geocoder)
for x in gpool.geocoders
]), 'not all of the GeocoderPool geocoders are geopy.Geocoder objects'
def test_geocoder_pool():
gpool = geocoderpool()
assert isinstance(gpool.geocoders, list)
def _generic_test_geocoderpool(gpool):
assert callable(gpool.geocode)
for test_case in addresses():
res = gpool.geocode(test_case)
assert isinstance(res, errorgeopy.location.Location)
assert isinstance(res._polygonisable(), bool)
assert all(
[isinstance(x, geopy.location.Location) for x in res.locations])
assert all([isinstance(x, str) for x in res.addresses])
assert all([isinstance(x, geopy.Point) for x in res.points])
assert isinstance(res.multipoint, shapely.geometry.MultiPoint) or (
res.multipoint is None and len(res) == 0)
assert isinstance(res.mbc, shapely.geometry.Polygon) or (
res.mbc is None and len(res) < 2)
assert isinstance(res.concave_hull, shapely.geometry.Polygon) or (
res.concave_hull is None and len(res) < 4)
assert isinstance(res.convex_hull, shapely.geometry.Polygon) or (
res.convex_hull is None and len(res) < 3)
assert isinstance(
res.centroid,
shapely.geometry.Point) or (res.centroid is None and len(res) == 0)
assert isinstance(res.clusters,
errorgeopy.location.LocationClusters) or (
res.clusters is None and len(res) == 0)
assert (res.clusters is None and len(res) == 0) or isinstance(
res.clusters.geometry_collection,
shapely.geometry.GeometryCollection)
assert (res.clusters is None and len(res) == 0) or isinstance(
res.clusters.cluster_centres, shapely.geometry.MultiPoint)
assert isinstance(
res.most_central_location, shapely.geometry.Point) or (
res.most_central_location is None and len(res) == 0)
assert res.most_central_location in res._shapely_points() or (
res.most_central_location is None and len(res) == 0)
def test_geocode():
gpool = geocoderpool()
_generic_test_geocoderpool(gpool)
def test_simple_geocode():
gpool = geocoderpool(load=False)
_generic_test_geocoderpool(gpool)
def test_reverse_geocode():
gpool = geocoderpool()
assert callable(gpool.reverse)
for test_case in addresses_reverse():
res = gpool.reverse(test_case)
assert isinstance(res, errorgeopy.address.Address)
assert len(res.addresses) <= len(res.dedupe())
assert isinstance(res.longest_common_substring(), str)
extract1 = res.extract(' '.join(str(res.addresses[0]).split()[::3]))
assert isinstance(extract1, list)
if len(extract1) > 0:
assert type(extract1[0][0]) is geopy.location.Location
assert type(extract1[0][1]) is int
assert sorted(
[e[1] for e in extract1],
reverse=True) == [e[1] for e in extract1]
extract2 = res.extract(res.extract(str(res.addresses[0])[::6]))
assert isinstance(extract2, list)
if len(extract2) > 0:
assert type(extract2[0][0]) is geopy.location.Location
assert type(extract2[0][1]) is int
assert sorted(
[e[1] for e in extract2],
reverse=True) == [e[1] for e in extract2]
with pytest.raises(NotImplementedError):
res.longest_common_sequence()
with pytest.raises(NotImplementedError):
res.regex()
with pytest.raises(NotImplementedError):
res.parse()
with pytest.raises(NotImplementedError):
res.tag()
|
mit
| 523,301,348,730,464,060
| 34.747312
| 99
| 0.638893
| false
| 3.56706
| true
| false
| false
|
onshape-public/onshape-clients
|
python/onshape_client/oas/models/card.py
|
1
|
9994
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import three_d_secure
except ImportError:
three_d_secure = sys.modules["onshape_client.oas.models.three_d_secure"]
class Card(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"account": (str,), # noqa: E501
"address_city": (str,), # noqa: E501
"address_country": (str,), # noqa: E501
"address_line1": (str,), # noqa: E501
"address_line1_check": (str,), # noqa: E501
"address_line2": (str,), # noqa: E501
"address_state": (str,), # noqa: E501
"address_zip": (str,), # noqa: E501
"address_zip_check": (str,), # noqa: E501
"available_payout_methods": ([str],), # noqa: E501
"brand": (str,), # noqa: E501
"country": (str,), # noqa: E501
"currency": (str,), # noqa: E501
"customer": (str,), # noqa: E501
"cvc_check": (str,), # noqa: E501
"default_for_currency": (bool,), # noqa: E501
"description": (str,), # noqa: E501
"dynamic_last4": (str,), # noqa: E501
"exp_month": (int,), # noqa: E501
"exp_year": (int,), # noqa: E501
"fingerprint": (str,), # noqa: E501
"funding": (str,), # noqa: E501
"id": (str,), # noqa: E501
"iin": (str,), # noqa: E501
"instance_url": (str,), # noqa: E501
"issuer": (str,), # noqa: E501
"last4": (str,), # noqa: E501
"metadata": ({str: (str,)},), # noqa: E501
"name": (str,), # noqa: E501
"object": (str,), # noqa: E501
"recipient": (str,), # noqa: E501
"status": (str,), # noqa: E501
"three_d_secure": (three_d_secure.ThreeDSecure,), # noqa: E501
"tokenization_method": (str,), # noqa: E501
"type": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"account": "account", # noqa: E501
"address_city": "addressCity", # noqa: E501
"address_country": "addressCountry", # noqa: E501
"address_line1": "addressLine1", # noqa: E501
"address_line1_check": "addressLine1Check", # noqa: E501
"address_line2": "addressLine2", # noqa: E501
"address_state": "addressState", # noqa: E501
"address_zip": "addressZip", # noqa: E501
"address_zip_check": "addressZipCheck", # noqa: E501
"available_payout_methods": "availablePayoutMethods", # noqa: E501
"brand": "brand", # noqa: E501
"country": "country", # noqa: E501
"currency": "currency", # noqa: E501
"customer": "customer", # noqa: E501
"cvc_check": "cvcCheck", # noqa: E501
"default_for_currency": "defaultForCurrency", # noqa: E501
"description": "description", # noqa: E501
"dynamic_last4": "dynamicLast4", # noqa: E501
"exp_month": "expMonth", # noqa: E501
"exp_year": "expYear", # noqa: E501
"fingerprint": "fingerprint", # noqa: E501
"funding": "funding", # noqa: E501
"id": "id", # noqa: E501
"iin": "iin", # noqa: E501
"instance_url": "instanceURL", # noqa: E501
"issuer": "issuer", # noqa: E501
"last4": "last4", # noqa: E501
"metadata": "metadata", # noqa: E501
"name": "name", # noqa: E501
"object": "object", # noqa: E501
"recipient": "recipient", # noqa: E501
"status": "status", # noqa: E501
"three_d_secure": "threeDSecure", # noqa: E501
"tokenization_method": "tokenizationMethod", # noqa: E501
"type": "type", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""card.Card - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
account (str): [optional] # noqa: E501
address_city (str): [optional] # noqa: E501
address_country (str): [optional] # noqa: E501
address_line1 (str): [optional] # noqa: E501
address_line1_check (str): [optional] # noqa: E501
address_line2 (str): [optional] # noqa: E501
address_state (str): [optional] # noqa: E501
address_zip (str): [optional] # noqa: E501
address_zip_check (str): [optional] # noqa: E501
available_payout_methods ([str]): [optional] # noqa: E501
brand (str): [optional] # noqa: E501
country (str): [optional] # noqa: E501
currency (str): [optional] # noqa: E501
customer (str): [optional] # noqa: E501
cvc_check (str): [optional] # noqa: E501
default_for_currency (bool): [optional] # noqa: E501
description (str): [optional] # noqa: E501
dynamic_last4 (str): [optional] # noqa: E501
exp_month (int): [optional] # noqa: E501
exp_year (int): [optional] # noqa: E501
fingerprint (str): [optional] # noqa: E501
funding (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
iin (str): [optional] # noqa: E501
instance_url (str): [optional] # noqa: E501
issuer (str): [optional] # noqa: E501
last4 (str): [optional] # noqa: E501
metadata ({str: (str,)}): [optional] # noqa: E501
name (str): [optional] # noqa: E501
object (str): [optional] # noqa: E501
recipient (str): [optional] # noqa: E501
status (str): [optional] # noqa: E501
three_d_secure (three_d_secure.ThreeDSecure): [optional] # noqa: E501
tokenization_method (str): [optional] # noqa: E501
type (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
mit
| 5,214,020,824,491,918,000
| 38.976
| 82
| 0.537823
| false
| 3.787041
| true
| false
| false
|
Wajihulhassan/SelfContainedPrevirt
|
tools/occam/occam/targets/ld.py
|
1
|
7705
|
# ------------------------------------------------------------------------------
# OCCAM
#
# Copyright © 2011-2012, SRI International
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of SRI International nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from occam import toolchain
from occam import target
from occam import driver
from occam.targets import argparser
from occam.targets import par
import logging
import tempfile
import os
def ld_default_o(input_file):
return 'a.out'
def useAsm(flags):
try:
return 'assembler' in flags[flags.index('-x')+1]
except ValueError,e:
return False
class LdTool (par.ParallelTool, argparser.ArgParser):
def flags(self): return [
'demangle', 'unique',
'trace-symbol', 'aarchive', 'ashared', 'adefault', 'd', 'dc', 'dp',
'E', 'export-dynamic', 'EB', 'EL', 'f', 'i', 'memulation', 'M',
'print-map', 'n', 'nmagic', 'N', 'omagic', 'no-omagic', 'q',
'emit-relocs', 'force-dynamic', 'r', 'relocatable', 's', 'strip-all',
'S', 'strip-debug', 't', 'trace', 'Ur', 'v', 'version', 'V', 'x',
'discard-all', 'X', 'discard-locals', 'accept-unknown-input-arch',
'no-accept-unknown-input-arch', 'as-needed', 'no-as-needed',
'add-needed', 'no-add-needed', 'Bdynamic', 'dy', 'call_shared',
'Bgroup', 'Bstatic', 'dn', 'non_shared', 'static', 'Bsymbolic',
'dynamic-list-cpp-typeinfo', 'check-sections', 'no-check-sections',
'cref', 'no-define-common', 'no-demangle', 'fatal-warnings',
'force-exe-suffix', 'gc-sections', 'no-gc-sections',
'print-gc-sections', 'no-print-gc-sections', 'help', 'target-help',
'no-keep-memory', 'no-undefined', 'allow-multiple-definition',
'allow-shlib-undefined', 'no-allow-shlib-undefined',
'no-undefined-version', 'default-symver', 'default-imported-symver',
'no-warn-mismatch', 'no-whole-archive', 'noinhibit-exec', 'nostdlib',
'pie', 'pic-executable', 'qmagic', 'Qy', 'relax', 'shared',
'Bshareable', 'sort-common', 'stats', 'traditional-format',
'dll-verbose', 'verbose', 'warn-common', 'warn-constructors',
'warn-multiple-gp', 'warn-once', 'warn-section-align',
'warn-shared-textrel', 'warn-unresolved-symbols',
'error-unresolved-symbols', 'whole-archive', 'eh-frame-hdr',
'enable-new-dtags', 'disable-new-dtags', 'reduce-memory-overheads',
'add-stdcall-alias', 'dll', 'enable-stdcall-fixup',
'disable-stdcall-fixup', 'export-all-symbols', 'file-alignment',
'kill-at', 'large-address-aware', 'enable-auto-image-base',
'disable-auto-image-base', 'enable-auto-import',
'disable-auto-import', 'enable-runtime-pseudo-reloc',
'disable-runtime-pseudo-reloc', 'enable-extra-pe-debug',
'section-alignment', 'no-trampoline'
]
def shortWithOpt(self): return [
'b', 'c', 'e', 'F', 'O', 'R', 'Ttext', 'Tbss', 'Tdata',
'u', 'y', 'Y', 'm', 'z', 'o', 'A', 'h', 'G', 'T', 'dynamic-linker'
]
def longWithOpt(self): return [
'architecture', 'format', 'mri-script', 'entry', 'gpsize', 'soname',
'just-symbols', 'script', 'undefined', 'unique', 'trace-symbol',
'dynamic-list', 'demangle', 'sysroot', 'unresolved-symbols',
'version-script', 'hash-size', 'hash-style', 'auxiliary', 'filter',
'fini', 'init', 'assert', 'defsym', 'dynamic-linker', 'Map',
'oformat', 'retain-symbols-file', 'rpath', 'rpath-link',
'sort-section', 'split-by-file', 'split-by-reloc', 'section-start',
'Tbss', 'Tdata', 'Text', 'wrap', 'base-file', 'image-base',
'major-image-version', 'major-os-version', 'major-subsystem-version',
'minor-image-version', 'minor-os-version', 'minor-subsystem-version',
'output-def', 'out-implib', 'dll-search-prefix', 'subsystem',
'bank-window', 'output'
]
def opts(self, args):
return ([], args)
def occam(self, cfg, args):
tool = self.name
#(input_files, output_file, flags) = parse_ld_args(args)
(input_files, output_file, flags) = self.parse_args(args)
print "ld input files: " + ' '.join(input_files)
# TODO: this seems to have side effects, but since I'm duplicating
# stdin it shouldn't, right?
cfg.log("%(in)s\n%(out)s\n%(fl)s\n",
{ 'in' : input_files.__repr__()
, 'out' : output_file.__repr__()
, 'fl' : flags.__repr__() })
if '-' in input_files:
# num = os.dup(0)
# fd = os.fdopen(num,'r')
# cfg.log("compiling stdin\n%(msg)s", {'msg' : fd.read()})
# fd.close()
return 0 # IAM: Could also check that output is /dev/null
if '/dev/null' == output_file:
cfg.log("skipping output /dev/null", {})
return 0
if len(input_files) == 0:
return 0
elif '-Wl,--help' in flags:
# this is just help
return 0
elif '-Wl,-shared' in flags:
# LLVM doesn't do shared...
return 0
elif '-Wl,-r' in flags or '-Wl,-i' in flags or '-Wl,--relocatable' in flags:
# this is essentially linking as a library
if output_file is None:
output_file = ld_default_o(input_files[0])
retcode = toolchain.bundle(self.fixname(output_file),
map(self.fixinputname,input_files),
[x for x in flags if x.startswith('-l')],
[x[2:] for x in flags if x.startswith('-L')])
return retcode
else:
if output_file is None:
output_file = ld_default_o(input_files[0])
retcode = toolchain.link(map(self.fixinputname,input_files),
self.fixname(output_file),
flags + ['-lc'],
save='%s_main.bc' % output_file,
link=True)
return retcode
for x in ['ld']:
target.register(x, LdTool(x))
|
bsd-3-clause
| 6,475,538,266,495,347,000
| 46.850932
| 84
| 0.579439
| false
| 3.665081
| false
| false
| false
|
HiroyukiAbe/pimouse_ros
|
scripts/lightsensors.py
|
1
|
1406
|
#!/usr/bin/env python
#encoding: utf8
import sys, rospy
from pimouse_ros.msg import LightSensorValues
def get_freq():
f = rospy.get_param('lightsensors_freq',10)
try:
if f <= 0.0:
raise Exception()
except:
rospy.logerr("value error: lightsensors_freq")
sys.exit(1)
return f
if __name__ == '__main__':
devfile = '/dev/rtlightsensor0'
rospy.init_node('lightsensors')
pub = rospy.Publisher('lightsensors', LightSensorValues, queue_size=1)
freq = get_freq()
rate = rospy.Rate(freq)
while not rospy.is_shutdown():
try:
with open(devfile,'r') as f:
data = f.readline().split()
data = [ int(e) for e in data ]
d = LightSensorValues()
d.right_forward = data[0]
d.right_side = data[1]
d.left_side = data[2]
d.left_forward =data[3]
d.sum_all = sum(data)
d.sum_forward = data[0] + data[3]
pub.publish(d)
except:
rospy.logerr("cannot write to " + devfile)
f = get_freq()
if f != freq:
freq = f
rate = rospy.Rate(freq)
rate.sleep()
|
bsd-3-clause
| -61,620,430,447,069,090
| 29.565217
| 78
| 0.45377
| false
| 3.938375
| false
| false
| false
|
shadsbot/AutoCrop
|
CropImage.py
|
1
|
4681
|
from PIL import Image, ImageTk, ImageDraw
class Crop:
"""Object that contains the tools to manipulate a spritesheet"""
def __init__(self, file="example.png", cropSize=[64,64], padding=0, offset=[0,0], direction="Both", numberCrops=0, useUserCrops=False):
self.direction = direction
self.offset = {"x" : offset[0], "y" : offset[1]}
self.padding = padding
self.cropSize = { "x" : cropSize[0], "y" : cropSize[1] }
self.numberCrops = numberCrops
self.useUserCrops = useUserCrops
try:
self.image = Image.open(file)
self.image.load()
except:
self.image = Image.new('RGB',(160,60), color='red')
self.imagedraw = ImageDraw.Draw(self.image)
self.imagedraw.text((10,10), "No image selected", fill=(0,0,0))
def setImage(self, file):
try:
self.image = Image.open(file)
self.image.load()
except:
self.image = Image.new('RGB',(160,60), color='red')
self.imagedraw = ImageDraw.Draw(self.image)
self.imagedraw.text((10,10), "Image not found", fill=(0,0,0))
def setDirection(self, direction):
self.direction = direction[:-2]
def setPadding(self, pad):
self.padding = pad
def setUserCrops(self, userCrops, number=0):
if (userCrops == "True"):
userCrops = True
else:
userCrops = False
self.numberCrops = number
self.useUserCrops = userCrops
def setSize(self, x, y):
self.cropSize = { "x" : x, "y" : y }
def setOffset(self, x, y):
self.offset = { "x" : x, "y" : y }
def horizontalLoops(self):
if self.useUserCrops:
return self.numberCrops
horizontalCrops = 0
index = self.offset["x"]
while (index < self.image.size[0]):
index = index + self.cropSize["x"] + self.padding
if (index <= self.image.size[0]):
horizontalCrops = horizontalCrops + 1
return horizontalCrops
def verticalLoops(self):
if self.useUserCrops:
return self.numberCrops
verticalCrops = 0
index = self.offset["y"]
while (index < self.image.size[1]):
index = index + self.cropSize["y"] + self.padding
if (index <= self.image.size[1]):
verticalCrops = verticalCrops + 1
return verticalCrops
def crop(self):
if self.direction == "Both":
for x in range(0,self.verticalLoops()):
currentYLoc = self.offset["y"] + ( x * (self.cropSize["y"]+self.padding) )
row = str(x) + "-"
self.cropHorizontally(currentYLoc,row)
elif self.direction == "Vertically":
self.cropVertically()
elif self.direction == "Horizontally":
self.cropHorizontally()
def cropHorizontally(self, currentYLoc=0, name=""):
if (currentYLoc == 0):
currentYLoc = self.offset["y"]
try:
for x in range(0,self.horizontalLoops()):
xposition = self.offset["x"] + (x * (self.cropSize["x"]+self.padding))
copy = self.image.crop((xposition, currentYLoc, xposition + self.cropSize["x"], currentYLoc + self.cropSize["y"]))
copy.save("%s%s.png" % (name,x))
return True
except:
print("An error occured during the cropHorizontally routine.")
return False
def cropVertically(self):
try:
for x in range(0,self.verticalLoops()):
yposition = self.offset["y"] + (x * (self.cropSize["y"]+self.padding))
copy = self.image.crop((self.offset["x"], yposition, self.offset["x"] + self.cropSize["x"], yposition + self.cropSize["y"]))
copy.save("%s.png" % x)
return True
except:
print("An error occured during the cropVertically routine.")
return False
def generatePreview(self):
try:
copy = self.image.copy()
tmp = ImageDraw.Draw(copy)
if (self.direction == "Both"):
for x in range(0,self.verticalLoops()):
currentYLoc = self.offset["y"] + ( x * (self.cropSize["y"]+self.padding) )
for y in range(0,self.horizontalLoops()):
xposition = self.offset["x"] + (y * (self.cropSize["x"]+self.padding))
tmp.rectangle( (xposition,currentYLoc,xposition+self.cropSize["x"],currentYLoc+self.cropSize["y"]), outline='red' )
if (self.direction == "Horizontally"):
for x in range(0,self.horizontalLoops()):
xposition = self.offset["x"] + (x * (self.cropSize["x"]+self.padding))
tmp.rectangle( (xposition,self.offset["y"],xposition+self.cropSize["x"],self.offset["y"]+self.cropSize["y"]), outline='red' )
if (self.direction == "Vertically"):
for x in range(0,self.verticalLoops()):
currentYLoc = self.offset["y"] + ( x * (self.cropSize["y"]+self.padding) )
xposition = self.offset["x"]
tmp.rectangle( (xposition,currentYLoc,xposition+self.cropSize["x"],currentYLoc+self.cropSize["y"]), outline='red' )
return copy
except:
return False
def debug(self):
print(self.direction)
print(self.offset)
print(self.padding)
print(self.cropSize)
print(self.numberCrops)
print(self.useUserCrops)
|
mit
| -8,013,426,775,849,068,000
| 33.681481
| 136
| 0.660543
| false
| 2.877074
| false
| false
| false
|
wdv4758h/flake8
|
flake8/engine.py
|
1
|
11434
|
# -*- coding: utf-8 -*-
import errno
import io
import platform
import re
import sys
import warnings
import pep8
from flake8 import __version__
from flake8 import callbacks
from flake8.reporter import (multiprocessing, BaseQReport, FileQReport,
QueueReport)
from flake8 import util
_flake8_noqa = re.compile(r'\s*# flake8[:=]\s*noqa', re.I).search
EXTRA_EXCLUDE = ['.tox', '.eggs', '*.egg']
pep8.PROJECT_CONFIG += ('.flake8',)
def _load_entry_point(entry_point, verify_requirements):
"""Based on the version of setuptools load an entry-point correctly.
setuptools 11.3 deprecated `require=False` in the call to EntryPoint.load.
To load entry points correctly after that without requiring all
dependencies be present, the proper way is to call EntryPoint.resolve.
This function will provide backwards compatibility for older versions of
setuptools while also ensuring we do the right thing for the future.
"""
if hasattr(entry_point, 'resolve') and hasattr(entry_point, 'require'):
if verify_requirements:
entry_point.require()
plugin = entry_point.resolve()
else:
plugin = entry_point.load(require=verify_requirements)
return plugin
def _register_extensions():
"""Register all the extensions."""
extensions = util.OrderedSet()
extensions.add(('pep8', pep8.__version__))
parser_hooks = []
options_hooks = []
ignored_hooks = []
try:
from pkg_resources import iter_entry_points
except ImportError:
pass
else:
for entry in iter_entry_points('flake8.extension'):
# Do not verify that the requirements versions are valid
checker = _load_entry_point(entry, verify_requirements=False)
pep8.register_check(checker, codes=[entry.name])
extensions.add((checker.name, checker.version))
if hasattr(checker, 'add_options'):
parser_hooks.append(checker.add_options)
if hasattr(checker, 'parse_options'):
options_hooks.append(checker.parse_options)
if getattr(checker, 'off_by_default', False) is True:
ignored_hooks.append(entry.name)
return extensions, parser_hooks, options_hooks, ignored_hooks
def get_parser():
"""This returns an instance of optparse.OptionParser with all the
extensions registered and options set. This wraps ``pep8.get_parser``.
"""
(extensions, parser_hooks, options_hooks, ignored) = _register_extensions()
details = ', '.join('%s: %s' % ext for ext in extensions)
python_version = get_python_version()
parser = pep8.get_parser('flake8', '%s (%s) %s' % (
__version__, details, python_version
))
for opt in ('--repeat', '--testsuite', '--doctest'):
try:
parser.remove_option(opt)
except ValueError:
pass
if multiprocessing:
parser.config_options.append('jobs')
parser.add_option('-j', '--jobs', type='string', default='auto',
help="number of jobs to run simultaneously, "
"or 'auto'. This is ignored on Windows.")
parser.add_option('--exit-zero', action='store_true',
help="exit with code 0 even if there are errors")
for parser_hook in parser_hooks:
parser_hook(parser)
# See comment above regarding why this has to be a callback.
parser.add_option('--install-hook', default=False, dest='install_hook',
help='Install the appropriate hook for this '
'repository.', action='callback',
callback=callbacks.install_vcs_hook)
parser.add_option('--output-file', default=None,
help='Redirect report to a file.',
type='string', nargs=1, action='callback',
callback=callbacks.redirect_stdout)
parser.add_option('--enable-extensions', default='',
dest='enabled_extensions',
help='Enable plugins and extensions that are disabled '
'by default',
type='string')
parser.ignored_extensions = ignored
return parser, options_hooks
class NoQAStyleGuide(pep8.StyleGuide):
def input_file(self, filename, lines=None, expected=None, line_offset=0):
"""Run all checks on a Python source file."""
if self.options.verbose:
print('checking %s' % filename)
fchecker = self.checker_class(
filename, lines=lines, options=self.options)
# Any "flake8: noqa" comments to ignore the entire file?
if any(_flake8_noqa(line) for line in fchecker.lines):
return 0
return fchecker.check_all(expected=expected, line_offset=line_offset)
class StyleGuide(object):
"""A wrapper StyleGuide object for Flake8 usage.
This allows for OSErrors to be caught in the styleguide and special logic
to be used to handle those errors.
"""
# Reasoning for error numbers is in-line below
serial_retry_errors = set([
# ENOSPC: Added by sigmavirus24
# > On some operating systems (OSX), multiprocessing may cause an
# > ENOSPC error while trying to trying to create a Semaphore.
# > In those cases, we should replace the customized Queue Report
# > class with pep8's StandardReport class to ensure users don't run
# > into this problem.
# > (See also: https://gitlab.com/pycqa/flake8/issues/74)
errno.ENOSPC,
# NOTE(sigmavirus24): When adding to this list, include the reasoning
# on the lines before the error code and always append your error
# code. Further, please always add a trailing `,` to reduce the visual
# noise in diffs.
])
def __init__(self, **kwargs):
# This allows us to inject a mocked StyleGuide in the tests.
self._styleguide = kwargs.pop('styleguide', NoQAStyleGuide(**kwargs))
@property
def options(self):
return self._styleguide.options
@property
def paths(self):
return self._styleguide.paths
def _retry_serial(self, func, *args, **kwargs):
"""This will retry the passed function in serial if necessary.
In the event that we encounter an OSError with an errno in
:attr:`serial_retry_errors`, this function will retry this function
using pep8's default Report class which operates in serial.
"""
try:
return func(*args, **kwargs)
except OSError as oserr:
if oserr.errno in self.serial_retry_errors:
self.init_report(pep8.StandardReport)
else:
raise
return func(*args, **kwargs)
def check_files(self, paths=None):
return self._retry_serial(self._styleguide.check_files, paths=paths)
def excluded(self, filename, parent=None):
return self._styleguide.excluded(filename, parent=parent)
def init_report(self, reporter=None):
return self._styleguide.init_report(reporter)
def input_file(self, filename, lines=None, expected=None, line_offset=0):
return self._retry_serial(
self._styleguide.input_file,
filename=filename,
lines=lines,
expected=expected,
line_offset=line_offset,
)
def _parse_multi_options(options, split_token=','):
r"""Split and strip and discard empties.
Turns the following:
A,
B,
into ["A", "B"].
Credit: Kristian Glass as contributed to pep8
"""
if options:
return [o.strip() for o in options.split(split_token) if o.strip()]
else:
return options
def _disable_extensions(parser, options):
ignored_extensions = set(getattr(parser, 'ignored_extensions', []))
enabled = set(_parse_multi_options(options.enabled_extensions))
# Remove any of the selected extensions from the extensions ignored by
# default.
ignored_extensions -= enabled
# Whatever is left afterwards should be unioned with options.ignore and
# options.ignore should be updated with that.
options.ignore = tuple(ignored_extensions.union(options.ignore))
def get_style_guide(**kwargs):
"""Parse the options and configure the checker. This returns a sub-class
of ``pep8.StyleGuide``."""
kwargs['parser'], options_hooks = get_parser()
styleguide = StyleGuide(**kwargs)
options = styleguide.options
_disable_extensions(kwargs['parser'], options)
if options.exclude and not isinstance(options.exclude, list):
options.exclude = pep8.normalize_paths(options.exclude)
elif not options.exclude:
options.exclude = []
# Add patterns in EXTRA_EXCLUDE to the list of excluded patterns
options.exclude.extend(pep8.normalize_paths(EXTRA_EXCLUDE))
for options_hook in options_hooks:
options_hook(options)
if util.warn_when_using_jobs(options):
if not multiprocessing:
warnings.warn("The multiprocessing module is not available. "
"Ignoring --jobs arguments.")
if util.is_windows():
warnings.warn("The --jobs option is not available on Windows. "
"Ignoring --jobs arguments.")
if util.is_using_stdin(styleguide.paths):
warnings.warn("The --jobs option is not compatible with supplying "
"input using - . Ignoring --jobs arguments.")
if options.diff:
warnings.warn("The --diff option was specified with --jobs but "
"they are not compatible. Ignoring --jobs arguments."
)
if options.diff:
options.jobs = None
force_disable_jobs = util.force_disable_jobs(styleguide)
if multiprocessing and options.jobs and not force_disable_jobs:
if options.jobs.isdigit():
n_jobs = int(options.jobs)
else:
try:
n_jobs = multiprocessing.cpu_count()
except NotImplementedError:
n_jobs = 1
if n_jobs > 1:
options.jobs = n_jobs
reporter = QueueReport
if options.quiet:
reporter = BaseQReport
if options.quiet == 1:
reporter = FileQReport
report = styleguide.init_report(reporter)
report.input_file = styleguide.input_file
styleguide.runner = report.task_queue.put
return styleguide
def get_python_version():
# The implementation isn't all that important.
try:
impl = platform.python_implementation() + " "
except AttributeError: # Python 2.5
impl = ''
return '%s%s on %s' % (impl, platform.python_version(), platform.system())
def make_stdin_get_value(original):
def stdin_get_value():
if not hasattr(stdin_get_value, 'cached_stdin'):
value = original()
if sys.version_info < (3, 0):
stdin = io.BytesIO(value)
else:
stdin = io.StringIO(value)
stdin_get_value.cached_stdin = stdin
else:
stdin = stdin_get_value.cached_stdin
return stdin.getvalue()
return stdin_get_value
pep8.stdin_get_value = make_stdin_get_value(pep8.stdin_get_value)
|
mit
| 5,101,505,648,745,265,000
| 35.298413
| 79
| 0.620255
| false
| 4.255303
| false
| false
| false
|
hughsaunders/keystone
|
keystone/common/ldap/core.py
|
1
|
71345
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import codecs
import functools
import os.path
import re
import sys
import weakref
import ldap
import ldap.filter
import ldappool
import six
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LW
from keystone.openstack.common import log
LOG = log.getLogger(__name__)
LDAP_VALUES = {'TRUE': True, 'FALSE': False}
CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
'sub': ldap.SCOPE_SUBTREE}
LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
'default': None,
'finding': ldap.DEREF_FINDING,
'never': ldap.DEREF_NEVER,
'searching': ldap.DEREF_SEARCHING}
LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
'demand': ldap.OPT_X_TLS_DEMAND,
'allow': ldap.OPT_X_TLS_ALLOW}
# RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to
# indicate that no attributes should be returned besides the DN.
DN_ONLY = ['1.1']
_utf8_encoder = codecs.getencoder('utf-8')
def utf8_encode(value):
"""Encode a basestring to UTF-8.
If the string is unicode encode it to UTF-8, if the string is
str then assume it's already encoded. Otherwise raise a TypeError.
:param value: A basestring
:returns: UTF-8 encoded version of value
:raises: TypeError if value is not basestring
"""
if isinstance(value, six.text_type):
return _utf8_encoder(value)[0]
elif isinstance(value, six.binary_type):
return value
else:
raise TypeError("value must be basestring, "
"not %s" % value.__class__.__name__)
_utf8_decoder = codecs.getdecoder('utf-8')
def utf8_decode(value):
"""Decode a from UTF-8 into unicode.
If the value is a binary string assume it's UTF-8 encoded and decode
it into a unicode string. Otherwise convert the value from its
type into a unicode string.
:param value: value to be returned as unicode
:returns: value as unicode
:raises: UnicodeDecodeError for invalid UTF-8 encoding
"""
if isinstance(value, six.binary_type):
return _utf8_decoder(value)[0]
return six.text_type(value)
def py2ldap(val):
"""Type convert a Python value to a type accepted by LDAP (unicode).
The LDAP API only accepts strings for values therefore convert
the value's type to a unicode string. A subsequent type conversion
will encode the unicode as UTF-8 as required by the python-ldap API,
but for now we just want a string representation of the value.
:param val: The value to convert to a LDAP string representation
:returns: unicode string representation of value.
"""
if isinstance(val, bool):
return u'TRUE' if val else u'FALSE'
else:
return six.text_type(val)
def ldap2py(val):
"""Convert an LDAP formatted value to Python type used by OpenStack.
Virtually all LDAP values are stored as UTF-8 encoded strings.
OpenStack prefers values which are Python types, e.g. unicode,
boolean, integer, etc.
:param val: LDAP formatted value
:returns: val converted to preferred Python type
"""
try:
return LDAP_VALUES[val]
except KeyError:
pass
try:
return int(val)
except ValueError:
pass
return utf8_decode(val)
def convert_ldap_result(ldap_result):
"""Convert LDAP search result to Python types used by OpenStack.
Each result tuple is of the form (dn, attrs), where dn is a string
containing the DN (distinguished name) of the entry, and attrs is
a dictionary containing the attributes associated with the
entry. The keys of attrs are strings, and the associated values
are lists of strings.
OpenStack wants to use Python types of its choosing. Strings will
be unicode, truth values boolean, whole numbers int's, etc. DN's will
also be decoded from UTF-8 to unicode.
:param ldap_result: LDAP search result
:returns: list of 2-tuples containing (dn, attrs) where dn is unicode
and attrs is a dict whose values are type converted to
OpenStack preferred types.
"""
py_result = []
at_least_one_referral = False
for dn, attrs in ldap_result:
ldap_attrs = {}
if dn is None:
# this is a Referral object, rather than an Entry object
at_least_one_referral = True
continue
for kind, values in six.iteritems(attrs):
try:
ldap_attrs[kind] = [ldap2py(x) for x in values]
except UnicodeDecodeError:
LOG.debug('Unable to decode value for attribute %s', kind)
py_result.append((utf8_decode(dn), ldap_attrs))
if at_least_one_referral:
LOG.debug(('Referrals were returned and ignored. Enable referral '
'chasing in keystone.conf via [ldap] chase_referrals'))
return py_result
def safe_iter(attrs):
if attrs is None:
return
elif isinstance(attrs, list):
for e in attrs:
yield e
else:
yield attrs
def parse_deref(opt):
try:
return LDAP_DEREF[opt]
except KeyError:
raise ValueError(_('Invalid LDAP deref option: %(option)s. '
'Choose one of: %(options)s') %
{'option': opt,
'options': ', '.join(LDAP_DEREF.keys()), })
def parse_tls_cert(opt):
try:
return LDAP_TLS_CERTS[opt]
except KeyError:
raise ValueError(_(
'Invalid LDAP TLS certs option: %(option)s. '
'Choose one of: %(options)s') % {
'option': opt,
'options': ', '.join(LDAP_TLS_CERTS.keys())})
def ldap_scope(scope):
try:
return LDAP_SCOPES[scope]
except KeyError:
raise ValueError(
_('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {
'scope': scope,
'options': ', '.join(LDAP_SCOPES.keys())})
def prep_case_insensitive(value):
"""Prepare a string for case-insensitive comparison.
This is defined in RFC4518. For simplicity, all this function does is
lowercase all the characters, strip leading and trailing whitespace,
and compress sequences of spaces to a single space.
"""
value = re.sub(r'\s+', ' ', value.strip().lower())
return value
def is_ava_value_equal(attribute_type, val1, val2):
"""Returns True if and only if the AVAs are equal.
When comparing AVAs, the equality matching rule for the attribute type
should be taken into consideration. For simplicity, this implementation
does a case-insensitive comparison.
Note that this function uses prep_case_insenstive so the limitations of
that function apply here.
"""
return prep_case_insensitive(val1) == prep_case_insensitive(val2)
def is_rdn_equal(rdn1, rdn2):
"""Returns True if and only if the RDNs are equal.
* RDNs must have the same number of AVAs.
* Each AVA of the RDNs must be the equal for the same attribute type. The
order isn't significant. Note that an attribute type will only be in one
AVA in an RDN, otherwise the DN wouldn't be valid.
* Attribute types aren't case sensitive. Note that attribute type
comparison is more complicated than implemented. This function only
compares case-insentive. The code should handle multiple names for an
attribute type (e.g., cn, commonName, and 2.5.4.3 are the same).
Note that this function uses is_ava_value_equal to compare AVAs so the
limitations of that function apply here.
"""
if len(rdn1) != len(rdn2):
return False
for attr_type_1, val1, dummy in rdn1:
found = False
for attr_type_2, val2, dummy in rdn2:
if attr_type_1.lower() != attr_type_2.lower():
continue
found = True
if not is_ava_value_equal(attr_type_1, val1, val2):
return False
break
if not found:
return False
return True
def is_dn_equal(dn1, dn2):
"""Returns True if and only if the DNs are equal.
Two DNs are equal if they've got the same number of RDNs and if the RDNs
are the same at each position. See RFC4517.
Note that this function uses is_rdn_equal to compare RDNs so the
limitations of that function apply here.
:param dn1: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(dn1, list):
dn1 = ldap.dn.str2dn(utf8_encode(dn1))
if not isinstance(dn2, list):
dn2 = ldap.dn.str2dn(utf8_encode(dn2))
if len(dn1) != len(dn2):
return False
for rdn1, rdn2 in zip(dn1, dn2):
if not is_rdn_equal(rdn1, rdn2):
return False
return True
def dn_startswith(descendant_dn, dn):
"""Returns True if and only if the descendant_dn is under the dn.
:param descendant_dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(descendant_dn, list):
descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
if not isinstance(dn, list):
dn = ldap.dn.str2dn(utf8_encode(dn))
if len(descendant_dn) <= len(dn):
return False
# Use the last len(dn) RDNs.
return is_dn_equal(descendant_dn[-len(dn):], dn)
@six.add_metaclass(abc.ABCMeta)
class LDAPHandler(object):
'''Abstract class which defines methods for a LDAP API provider.
Native Keystone values cannot be passed directly into and from the
python-ldap API. Type conversion must occur at the LDAP API
boudary, examples of type conversions are:
* booleans map to the strings 'TRUE' and 'FALSE'
* integer values map to their string representation.
* unicode strings are encoded in UTF-8
In addition to handling type conversions at the API boundary we
have the requirement to support more than one LDAP API
provider. Currently we have:
* python-ldap, this is the standard LDAP API for Python, it
requires access to a live LDAP server.
* Fake LDAP which emulates python-ldap. This is used for
testing without requiring a live LDAP server.
To support these requirements we need a layer that performs type
conversions and then calls another LDAP API which is configurable
(e.g. either python-ldap or the fake emulation).
We have an additional constraint at the time of this writing due to
limitations in the logging module. The logging module is not
capable of accepting UTF-8 encoded strings, it will throw an
encoding exception. Therefore all logging MUST be performed prior
to UTF-8 conversion. This means no logging can be performed in the
ldap APIs that implement the python-ldap API because those APIs
are defined to accept only UTF-8 strings. Thus the layer which
performs type conversions must also do the logging. We do the type
conversions in two steps, once to convert all Python types to
unicode strings, then log, then convert the unicode strings to
UTF-8.
There are a variety of ways one could accomplish this, we elect to
use a chaining technique whereby instances of this class simply
call the next member in the chain via the "conn" attribute. The
chain is constructed by passing in an existing instance of this
class as the conn attribute when the class is instantiated.
Here is a brief explanation of why other possible approaches were
not used:
subclassing
To perform the wrapping operations in the correct order
the type convesion class would have to subclass each of
the API providers. This is awkward, doubles the number of
classes, and does not scale well. It requires the type
conversion class to be aware of all possible API
providers.
decorators
Decorators provide an elegant solution to wrap methods and
would be an ideal way to perform type conversions before
calling the wrapped function and then converting the
values returned from the wrapped function. However
decorators need to be aware of the method signature, it
has to know what input parameters need conversion and how
to convert the result. For an API like python-ldap which
has a large number of different method signatures it would
require a large number of specialized
decorators. Experience has shown it's very easy to apply
the wrong decorator due to the inherent complexity and
tendency to cut-n-paste code. Another option is to
parameterize the decorator to make it "smart". Experience
has shown such decorators become insanely complicated and
difficult to understand and debug. Also decorators tend to
hide what's really going on when a method is called, the
operations being performed are not visible when looking at
the implemation of a decorated method, this too experience
has shown leads to mistakes.
Chaining simplifies both wrapping to perform type conversion as
well as the substitution of alternative API providers. One simply
creates a new instance of the API interface and insert it at the
front of the chain. Type conversions are explicit and obvious.
If a new method needs to be added to the API interface one adds it
to the abstract class definition. Should one miss adding the new
method to any derivations of the abstract class the code will fail
to load and run making it impossible to forget updating all the
derived classes.
'''
@abc.abstractmethod
def __init__(self, conn=None):
self.conn = conn
@abc.abstractmethod
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def set_option(self, option, invalue):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_option(self, option):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def unbind_s(self):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def add_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def modify_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_s(self, dn):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
class PythonLDAPHandler(LDAPHandler):
'''Implementation of the LDAPHandler interface which calls the
python-ldap API.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
'''
def __init__(self, conn=None):
super(PythonLDAPHandler, self).__init__(conn=conn)
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.conn = ldap.initialize(url)
self.conn.protocol_version = ldap.VERSION3
if alias_dereferencing is not None:
self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing)
self.page_size = page_size
if use_tls:
self.conn.start_tls_s()
if chase_referrals is not None:
self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls)
def unbind_s(self):
return self.conn.unbind_s()
def add_s(self, dn, modlist):
return self.conn.add_s(dn, modlist)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return self.conn.search_s(base, scope, filterstr,
attrlist, attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
# The resp_ctrl_classes parameter is a recent addition to the
# API. It defaults to None. We do not anticipate using it.
# To run with older versions of python-ldap we do not pass it.
return self.conn.result3(msgid, all, timeout)
def modify_s(self, dn, modlist):
return self.conn.modify_s(dn, modlist)
def delete_s(self, dn):
return self.conn.delete_s(dn)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
return self.conn.delete_ext_s(dn, serverctrls, clientctrls)
def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
tls_cacertdir=None, tls_req_cert=None,
debug_level=None):
'''Method for common ldap initialization between PythonLDAPHandler and
PooledLDAPHandler.
'''
LOG.debug("LDAP init: url=%s", url)
LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s '
'tls_req_cert=%s tls_avail=%s',
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, ldap.TLS_AVAIL)
if debug_level is not None:
ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level)
using_ldaps = url.lower().startswith("ldaps")
if use_tls and using_ldaps:
raise AssertionError(_('Invalid TLS / LDAPS combination'))
# The certificate trust options apply for both LDAPS and TLS.
if use_tls or using_ldaps:
if not ldap.TLS_AVAIL:
raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS '
'not available') % ldap.TLS_AVAIL)
if tls_cacertfile:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isfile(tls_cacertfile):
raise IOError(_("tls_cacertfile %s not found "
"or is not a file") %
tls_cacertfile)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
elif tls_cacertdir:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isdir(tls_cacertdir):
raise IOError(_("tls_cacertdir %s not found "
"or is not a directory") %
tls_cacertdir)
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
if tls_req_cert in LDAP_TLS_CERTS.values():
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s",
tls_req_cert)
class MsgId(list):
'''Wrapper class to hold connection and msgid.'''
pass
def use_conn_pool(func):
'''Use this only for connection pool specific ldap API.
This adds connection object to decorated API as next argument after self.
'''
def wrapper(self, *args, **kwargs):
# assert isinstance(self, PooledLDAPHandler)
with self._get_pool_connection() as conn:
self._apply_options(conn)
return func(self, conn, *args, **kwargs)
return wrapper
class PooledLDAPHandler(LDAPHandler):
'''Implementation of the LDAPHandler interface which uses pooled
connection manager.
Pool specific configuration is defined in [ldap] section.
All other LDAP configuration is still used from [ldap] section
Keystone LDAP authentication logic authenticates an end user using its DN
and password via LDAP bind to establish supplied password is correct.
This can fill up the pool quickly (as pool re-uses existing connection
based on its bind data) and would not leave space in pool for connection
re-use for other LDAP operations.
Now a separate pool can be established for those requests when related flag
'use_auth_pool' is enabled. That pool can have its own size and
connection lifetime. Other pool attributes are shared between those pools.
If 'use_pool' is disabled, then 'use_auth_pool' does not matter.
If 'use_auth_pool' is not enabled, then connection pooling is not used for
those LDAP operations.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
'''
# Added here to allow override for testing
Connector = ldappool.StateConnector
auth_pool_prefix = 'auth_pool_'
connection_pools = {} # static connector pool dict
def __init__(self, conn=None, use_auth_pool=False):
super(PooledLDAPHandler, self).__init__(conn=conn)
self.who = ''
self.cred = ''
self.conn_options = {} # connection specific options
self.page_size = None
self.use_auth_pool = use_auth_pool
self.conn_pool = None
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.page_size = page_size
# Following two options are not added in common initialization as they
# need to follow a sequence in PythonLDAPHandler code.
if alias_dereferencing is not None:
self.set_option(ldap.OPT_DEREF, alias_dereferencing)
if chase_referrals is not None:
self.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
if self.use_auth_pool: # separate pool when use_auth_pool enabled
pool_url = self.auth_pool_prefix + url
else:
pool_url = url
try:
self.conn_pool = self.connection_pools[pool_url]
except KeyError:
self.conn_pool = ldappool.ConnectionManager(
url,
size=pool_size,
retry_max=pool_retry_max,
retry_delay=pool_retry_delay,
timeout=pool_conn_timeout,
connector_cls=self.Connector,
use_tls=use_tls,
max_lifetime=pool_conn_lifetime)
self.connection_pools[pool_url] = self.conn_pool
def set_option(self, option, invalue):
self.conn_options[option] = invalue
def get_option(self, option):
value = self.conn_options.get(option)
# if option was not specified explicitly, then use connection default
# value for that option if there.
if value is None:
with self._get_pool_connection() as conn:
value = conn.get_option(option)
return value
def _apply_options(self, conn):
# if connection has a lifetime, then it already has options specified
if conn.get_lifetime() > 30:
return
for option, invalue in six.iteritems(self.conn_options):
conn.set_option(option, invalue)
def _get_pool_connection(self):
return self.conn_pool.connection(self.who, self.cred)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
'''Not using use_conn_pool decorator here as this API takes cred as
input.
'''
self.who = who
self.cred = cred
with self._get_pool_connection() as conn:
self._apply_options(conn)
def unbind_s(self):
# After connection generator is done `with` statement execution block
# connection is always released via finally block in ldappool.
# So this unbind is a no op.
pass
@use_conn_pool
def add_s(self, conn, dn, modlist):
return conn.add_s(dn, modlist)
@use_conn_pool
def search_s(self, conn, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return conn.search_s(base, scope, filterstr, attrlist,
attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
'''This API is asynchoronus API which returns MsgId instance to be used
in result3 call.
To work with result3 API in predicatable manner, same LDAP connection
is needed which provided msgid. So wrapping used connection and msgid
in MsgId class. The connection associated with search_ext is released
once last hard reference to MsgId object is freed. This will happen
when the method is done with returned MsgId usage.
'''
conn_ctxt = self._get_pool_connection()
conn = conn_ctxt.__enter__()
try:
msgid = conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
except Exception:
conn_ctxt.__exit__(*sys.exc_info())
raise
res = MsgId((conn, msgid))
weakref.ref(res, functools.partial(conn_ctxt.__exit__,
None, None, None))
return res
def result3(self, msgid, all=1, timeout=None,
resp_ctrl_classes=None):
'''This method is used to wait for and return the result of an
operation previously initiated by one of the LDAP asynchronous
operation routines (eg search_ext()) It returned an invocation
identifier (a message id) upon successful initiation of their
operation.
Input msgid is expected to be instance of class MsgId which has LDAP
session/connection used to execute search_ext and message idenfier.
The connection associated with search_ext is released once last hard
reference to MsgId object is freed. This will happen when function
which requested msgId and used it in result3 exits.
'''
conn, msg_id = msgid
return conn.result3(msg_id, all, timeout)
@use_conn_pool
def modify_s(self, conn, dn, modlist):
return conn.modify_s(dn, modlist)
@use_conn_pool
def delete_s(self, conn, dn):
return conn.delete_s(dn)
@use_conn_pool
def delete_ext_s(self, conn, dn, serverctrls=None, clientctrls=None):
return conn.delete_ext_s(dn, serverctrls, clientctrls)
class KeystoneLDAPHandler(LDAPHandler):
'''Convert data types and perform logging.
This LDAP inteface wraps the python-ldap based interfaces. The
python-ldap interfaces require string values encoded in UTF-8. The
OpenStack logging framework at the time of this writing is not
capable of accepting strings encoded in UTF-8, the log functions
will throw decoding errors if a non-ascii character appears in a
string.
Prior to the call Python data types are converted to a string
representation as required by the LDAP APIs.
Then logging is performed so we can track what is being
sent/received from LDAP. Also the logging filters security
sensitive items (i.e. passwords).
Then the string values are encoded into UTF-8.
Then the LDAP API entry point is invoked.
Data returned from the LDAP call is converted back from UTF-8
encoded strings into the Python data type used internally in
OpenStack.
'''
def __init__(self, conn=None):
super(KeystoneLDAPHandler, self).__init__(conn=conn)
self.page_size = 0
def __enter__(self):
return self
def _disable_paging(self):
# Disable the pagination from now on
self.page_size = 0
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None,
pool_retry_max=None, pool_retry_delay=None,
pool_conn_timeout=None, pool_conn_lifetime=None):
self.page_size = page_size
return self.conn.connect(url, page_size, alias_dereferencing,
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, chase_referrals,
debug_level=debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=pool_retry_max,
pool_retry_delay=pool_retry_delay,
pool_conn_timeout=pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime)
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
LOG.debug("LDAP bind: who=%s", who)
who_utf8 = utf8_encode(who)
cred_utf8 = utf8_encode(cred)
return self.conn.simple_bind_s(who_utf8, cred_utf8,
serverctrls=serverctrls,
clientctrls=clientctrls)
def unbind_s(self):
LOG.debug("LDAP unbind")
return self.conn.unbind_s()
def add_s(self, dn, modlist):
ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
for kind, values in modlist]
logging_attrs = [(kind, values
if kind != 'userPassword'
else ['****'])
for kind, values in ldap_attrs]
LOG.debug('LDAP add: dn=%s attrs=%s',
dn, logging_attrs)
dn_utf8 = utf8_encode(dn)
ldap_attrs_utf8 = [(kind, [utf8_encode(x) for x in safe_iter(values)])
for kind, values in ldap_attrs]
return self.conn.add_s(dn_utf8, ldap_attrs_utf8)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
# NOTE(morganfainberg): Remove "None" singletons from this list, which
# allows us to set mapped attributes to "None" as defaults in config.
# Without this filtering, the ldap query would raise a TypeError since
# attrlist is expected to be an iterable of strings.
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s',
base, scope, filterstr, attrlist, attrsonly)
if self.page_size:
ldap_result = self._paged_search_s(base, scope,
filterstr, attrlist)
else:
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist_utf8 = map(utf8_encode, attrlist)
ldap_result = self.conn.search_s(base_utf8, scope,
filterstr_utf8,
attrlist_utf8, attrsonly)
py_result = convert_ldap_result(ldap_result)
return py_result
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s'
'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
base, scope, filterstr, attrlist, attrsonly,
serverctrls, clientctrls, timeout, sizelimit)
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def _paged_search_s(self, base, scope, filterstr, attrlist=None):
res = []
use_old_paging_api = False
# The API for the simple paged results control changed between
# python-ldap 2.3 and 2.4. We need to detect the capabilities
# of the python-ldap version we are using.
if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'):
use_old_paging_api = True
lc = ldap.controls.SimplePagedResultsControl(
controlType=ldap.LDAP_CONTROL_PAGE_OID,
criticality=True,
controlValue=(self.page_size, ''))
page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID
else:
lc = ldap.controls.libldap.SimplePagedResultsControl(
criticality=True,
size=self.page_size,
cookie='')
page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist = [attr for attr in attrlist if attr is not None]
attrlist_utf8 = map(utf8_encode, attrlist)
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
# Endless loop request pages on ldap server until it has no data
while True:
# Request to the ldap server a page with 'page_size' entries
rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid)
# Receive the data
res.extend(rdata)
pctrls = [c for c in serverctrls
if c.controlType == page_ctrl_oid]
if pctrls:
# LDAP server supports pagination
if use_old_paging_api:
est, cookie = pctrls[0].controlValue
lc.controlValue = (self.page_size, cookie)
else:
cookie = lc.cookie = pctrls[0].cookie
if cookie:
# There is more data still on the server
# so we request another page
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
else:
# Exit condition no more data on server
break
else:
LOG.warning(_LW('LDAP Server does not support paging. '
'Disable paging in keystone.conf to '
'avoid this message.'))
self._disable_paging()
break
return res
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes)
LOG.debug('LDAP result3: msgid=%s all=%s timeout=%s '
'resp_ctrl_classes=%s ldap_result=%s',
msgid, all, timeout, resp_ctrl_classes, ldap_result)
py_result = convert_ldap_result(ldap_result)
return py_result
def modify_s(self, dn, modlist):
ldap_modlist = [
(op, kind, (None if values is None
else [py2ldap(x) for x in safe_iter(values)]))
for op, kind, values in modlist]
logging_modlist = [(op, kind, (values if kind != 'userPassword'
else ['****']))
for op, kind, values in ldap_modlist]
LOG.debug('LDAP modify: dn=%s modlist=%s',
dn, logging_modlist)
dn_utf8 = utf8_encode(dn)
ldap_modlist_utf8 = [
(op, kind, (None if values is None
else [utf8_encode(x) for x in safe_iter(values)]))
for op, kind, values in ldap_modlist]
return self.conn.modify_s(dn_utf8, ldap_modlist_utf8)
def delete_s(self, dn):
LOG.debug("LDAP delete: dn=%s", dn)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_s(dn_utf8)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
LOG.debug('LDAP delete_ext: dn=%s serverctrls=%s clientctrls=%s',
dn, serverctrls, clientctrls)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_ext_s(dn_utf8, serverctrls, clientctrls)
def __exit__(self, exc_type, exc_val, exc_tb):
self.unbind_s()
_HANDLERS = {}
def register_handler(prefix, handler):
_HANDLERS[prefix] = handler
def _get_connection(conn_url, use_pool=False, use_auth_pool=False):
for prefix, handler in six.iteritems(_HANDLERS):
if conn_url.startswith(prefix):
return handler()
if use_pool:
return PooledLDAPHandler(use_auth_pool=use_auth_pool)
else:
return PythonLDAPHandler()
def filter_entity(entity_ref):
"""Filter out private items in an entity dict.
:param entity_ref: the entity dictionary. The 'dn' field will be removed.
'dn' is used in LDAP, but should not be returned to the user. This
value may be modified.
:returns: entity_ref
"""
if entity_ref:
entity_ref.pop('dn', None)
return entity_ref
class BaseLdap(object):
DEFAULT_SUFFIX = "dc=example,dc=com"
DEFAULT_OU = None
DEFAULT_STRUCTURAL_CLASSES = None
DEFAULT_ID_ATTR = 'cn'
DEFAULT_OBJECTCLASS = None
DEFAULT_FILTER = None
DEFAULT_EXTRA_ATTR_MAPPING = []
DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent'
NotFound = None
notfound_arg = None
options_name = None
model = None
attribute_options_names = {}
immutable_attrs = []
attribute_ignore = []
tree_dn = None
def __init__(self, conf):
self.LDAP_URL = conf.ldap.url
self.LDAP_USER = conf.ldap.user
self.LDAP_PASSWORD = conf.ldap.password
self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope)
self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing)
self.page_size = conf.ldap.page_size
self.use_tls = conf.ldap.use_tls
self.tls_cacertfile = conf.ldap.tls_cacertfile
self.tls_cacertdir = conf.ldap.tls_cacertdir
self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert)
self.attribute_mapping = {}
self.chase_referrals = conf.ldap.chase_referrals
self.debug_level = conf.ldap.debug_level
# LDAP Pool specific attribute
self.use_pool = conf.ldap.use_pool
self.pool_size = conf.ldap.pool_size
self.pool_retry_max = conf.ldap.pool_retry_max
self.pool_retry_delay = conf.ldap.pool_retry_delay
self.pool_conn_timeout = conf.ldap.pool_connection_timeout
self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime
# End user authentication pool specific config attributes
self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool
self.auth_pool_size = conf.ldap.auth_pool_size
self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime
if self.options_name is not None:
self.suffix = conf.ldap.suffix
if self.suffix is None:
self.suffix = self.DEFAULT_SUFFIX
dn = '%s_tree_dn' % self.options_name
self.tree_dn = (getattr(conf.ldap, dn)
or '%s,%s' % (self.DEFAULT_OU, self.suffix))
idatt = '%s_id_attribute' % self.options_name
self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
objclass = '%s_objectclass' % self.options_name
self.object_class = (getattr(conf.ldap, objclass)
or self.DEFAULT_OBJECTCLASS)
for k, v in six.iteritems(self.attribute_options_names):
v = '%s_%s_attribute' % (self.options_name, v)
self.attribute_mapping[k] = getattr(conf.ldap, v)
attr_mapping_opt = ('%s_additional_attribute_mapping' %
self.options_name)
attr_mapping = (getattr(conf.ldap, attr_mapping_opt)
or self.DEFAULT_EXTRA_ATTR_MAPPING)
self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping)
ldap_filter = '%s_filter' % self.options_name
self.ldap_filter = getattr(conf.ldap,
ldap_filter) or self.DEFAULT_FILTER
allow_create = '%s_allow_create' % self.options_name
self.allow_create = getattr(conf.ldap, allow_create)
allow_update = '%s_allow_update' % self.options_name
self.allow_update = getattr(conf.ldap, allow_update)
allow_delete = '%s_allow_delete' % self.options_name
self.allow_delete = getattr(conf.ldap, allow_delete)
member_attribute = '%s_member_attribute' % self.options_name
self.member_attribute = getattr(conf.ldap, member_attribute, None)
self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
if self.notfound_arg is None:
self.notfound_arg = self.options_name + '_id'
attribute_ignore = '%s_attribute_ignore' % self.options_name
self.attribute_ignore = getattr(conf.ldap, attribute_ignore)
self.use_dumb_member = getattr(conf.ldap, 'use_dumb_member')
self.dumb_member = (getattr(conf.ldap, 'dumb_member') or
self.DUMB_MEMBER_DN)
self.subtree_delete_enabled = getattr(conf.ldap,
'allow_subtree_delete')
def _not_found(self, object_id):
if self.NotFound is None:
return exception.NotFound(target=object_id)
else:
return self.NotFound(**{self.notfound_arg: object_id})
def _parse_extra_attrs(self, option_list):
mapping = {}
for item in option_list:
try:
ldap_attr, attr_map = item.split(':')
except Exception:
LOG.warn(_LW(
'Invalid additional attribute mapping: "%s". '
'Format must be <ldap_attribute>:<keystone_attribute>'),
item)
continue
mapping[ldap_attr] = attr_map
return mapping
def _is_dumb_member(self, member_dn):
"""Checks that member is a dumb member.
:param member_dn: DN of member to be checked.
"""
return (self.use_dumb_member
and is_dn_equal(member_dn, self.dumb_member))
def get_connection(self, user=None, password=None, end_user_auth=False):
use_pool = self.use_pool
pool_size = self.pool_size
pool_conn_lifetime = self.pool_conn_lifetime
if end_user_auth:
if not self.use_auth_pool:
use_pool = False
else:
pool_size = self.auth_pool_size
pool_conn_lifetime = self.auth_pool_conn_lifetime
conn = _get_connection(self.LDAP_URL, use_pool,
use_auth_pool=end_user_auth)
conn = KeystoneLDAPHandler(conn=conn)
conn.connect(self.LDAP_URL,
page_size=self.page_size,
alias_dereferencing=self.alias_dereferencing,
use_tls=self.use_tls,
tls_cacertfile=self.tls_cacertfile,
tls_cacertdir=self.tls_cacertdir,
tls_req_cert=self.tls_req_cert,
chase_referrals=self.chase_referrals,
debug_level=self.debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=self.pool_retry_max,
pool_retry_delay=self.pool_retry_delay,
pool_conn_timeout=self.pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime
)
if user is None:
user = self.LDAP_USER
if password is None:
password = self.LDAP_PASSWORD
# not all LDAP servers require authentication, so we don't bind
# if we don't have any user/pass
if user and password:
conn.simple_bind_s(user, password)
return conn
def _id_to_dn_string(self, object_id):
return u'%s=%s,%s' % (self.id_attr,
ldap.dn.escape_dn_chars(
six.text_type(object_id)),
self.tree_dn)
def _id_to_dn(self, object_id):
if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL:
return self._id_to_dn_string(object_id)
with self.get_connection() as conn:
search_result = conn.search_s(
self.tree_dn, self.LDAP_SCOPE,
u'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' %
{'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'objclass': self.object_class},
attrlist=DN_ONLY)
if search_result:
dn, attrs = search_result[0]
return dn
else:
return self._id_to_dn_string(object_id)
@staticmethod
def _dn_to_id(dn):
return utf8_decode(ldap.dn.str2dn(utf8_encode(dn))[0][0][1])
def _ldap_res_to_model(self, res):
# LDAP attribute names may be returned in a different case than
# they are defined in the mapping, so we need to check for keys
# in a case-insensitive way. We use the case specified in the
# mapping for the model to ensure we have a predictable way of
# retrieving values later.
lower_res = dict((k.lower(), v) for k, v in six.iteritems(res[1]))
id_attrs = lower_res.get(self.id_attr.lower())
if not id_attrs:
message = _('ID attribute %(id_attr)s not found in LDAP '
'object %(dn)s') % ({'id_attr': self.id_attr,
'dn': res[0]})
raise exception.NotFound(message=message)
if len(id_attrs) > 1:
# FIXME(gyee): if this is a multi-value attribute and it has
# multiple values, we can't use it as ID. Retain the dn_to_id
# logic here so it does not potentially break existing
# deployments. We need to fix our read-write LDAP logic so
# it does not get the ID from DN.
message = _LW('ID attribute %(id_attr)s for LDAP object %(dn)s '
'has multiple values and therefore cannot be used '
'as an ID. Will get the ID from DN instead') % (
{'id_attr': self.id_attr,
'dn': res[0]})
LOG.warn(message)
id_val = self._dn_to_id(res[0])
else:
id_val = id_attrs[0]
obj = self.model(id=id_val)
for k in obj.known_keys:
if k in self.attribute_ignore:
continue
try:
map_attr = self.attribute_mapping.get(k, k)
if map_attr is None:
# Ignore attributes that are mapped to None.
continue
v = lower_res[map_attr.lower()]
except KeyError:
pass
else:
try:
obj[k] = v[0]
except IndexError:
obj[k] = None
return obj
def check_allow_create(self):
if not self.allow_create:
action = _('LDAP %s create') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_update(self):
if not self.allow_update:
action = _('LDAP %s update') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_delete(self):
if not self.allow_delete:
action = _('LDAP %s delete') % self.options_name
raise exception.ForbiddenAction(action=action)
def affirm_unique(self, values):
if values.get('name') is not None:
try:
self.get_by_name(values['name'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate name, %s.') %
values['name'])
if values.get('id') is not None:
try:
self.get(values['id'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate ID, %s.') %
values['id'])
def create(self, values):
self.affirm_unique(values)
object_classes = self.structural_classes + [self.object_class]
attrs = [('objectClass', object_classes)]
for k, v in six.iteritems(values):
if k in self.attribute_ignore:
continue
if k == 'id':
# no need to check if v is None as 'id' will always have
# a value
attrs.append((self.id_attr, [v]))
elif v is not None:
attr_type = self.attribute_mapping.get(k, k)
if attr_type is not None:
attrs.append((attr_type, [v]))
extra_attrs = [attr for attr, name
in six.iteritems(self.extra_attr_mapping)
if name == k]
for attr in extra_attrs:
attrs.append((attr, [v]))
if 'groupOfNames' in object_classes and self.use_dumb_member:
attrs.append(('member', [self.dumb_member]))
with self.get_connection() as conn:
conn.add_s(self._id_to_dn(values['id']), attrs)
return values
def _ldap_get(self, object_id, ldap_filter=None):
query = (u'(&(%(id_attr)s=%(id)s)'
u'%(filter)s'
u'(objectClass=%(object_class)s))'
% {'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'filter': (ldap_filter or self.ldap_filter or ''),
'object_class': self.object_class})
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
self.attribute_mapping.values() +
self.extra_attr_mapping.keys())))
res = conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return None
try:
return res[0]
except IndexError:
return None
def _ldap_get_all(self, ldap_filter=None):
query = u'(&%s(objectClass=%s))' % (ldap_filter or
self.ldap_filter or
'', self.object_class)
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
self.attribute_mapping.values() +
self.extra_attr_mapping.keys())))
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return []
def _ldap_get_list(self, search_base, scope, query_params=None,
attrlist=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
def calc_filter(attrname, value):
val_esc = ldap.filter.escape_filter_chars(value)
return '(%s=%s)' % (attrname, val_esc)
query = (u'(&%s%s)' %
(query, ''.join([calc_filter(k, v) for k, v in
six.iteritems(query_params)])))
with self.get_connection() as conn:
return conn.search_s(search_base, scope, query, attrlist)
def get(self, object_id, ldap_filter=None):
res = self._ldap_get(object_id, ldap_filter)
if res is None:
raise self._not_found(object_id)
else:
return self._ldap_res_to_model(res)
def get_by_name(self, name, ldap_filter=None):
query = (u'(%s=%s)' % (self.attribute_mapping['name'],
ldap.filter.escape_filter_chars(
six.text_type(name))))
res = self.get_all(query)
try:
return res[0]
except IndexError:
raise self._not_found(name)
def get_all(self, ldap_filter=None):
return [self._ldap_res_to_model(x)
for x in self._ldap_get_all(ldap_filter)]
def update(self, object_id, values, old_obj=None):
if old_obj is None:
old_obj = self.get(object_id)
modlist = []
for k, v in six.iteritems(values):
if k == 'id' or k in self.attribute_ignore:
continue
# attribute value has not changed
if k in old_obj and old_obj[k] == v:
continue
if k in self.immutable_attrs:
msg = (_("Cannot change %(option_name)s %(attr)s") %
{'option_name': self.options_name, 'attr': k})
raise exception.ValidationError(msg)
if v is None:
if old_obj.get(k) is not None:
modlist.append((ldap.MOD_DELETE,
self.attribute_mapping.get(k, k),
None))
continue
current_value = old_obj.get(k)
if current_value is None:
op = ldap.MOD_ADD
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
elif current_value != v:
op = ldap.MOD_REPLACE
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
if modlist:
with self.get_connection() as conn:
try:
conn.modify_s(self._id_to_dn(object_id), modlist)
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
return self.get(object_id)
def delete(self, object_id):
with self.get_connection() as conn:
try:
conn.delete_s(self._id_to_dn(object_id))
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
def deleteTree(self, object_id):
tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
0,
None)
with self.get_connection() as conn:
try:
conn.delete_ext_s(self._id_to_dn(object_id),
serverctrls=[tree_delete_control])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
except ldap.NOT_ALLOWED_ON_NONLEAF:
# Most LDAP servers do not support the tree_delete_control.
# In these servers, the usual idiom is to first perform a
# search to get the entries to delete, then delete them in
# in order of child to parent, since LDAP forbids the
# deletion of a parent entry before deleting the children
# of that parent. The simplest way to do that is to delete
# the entries in order of the length of the DN, from longest
# to shortest DN.
dn = self._id_to_dn(object_id)
scope = ldap.SCOPE_SUBTREE
# With some directory servers, an entry with objectclass
# ldapsubentry will not be returned unless it is explicitly
# requested, by specifying the objectclass in the search
# filter. We must specify this, with objectclass=*, in an
# LDAP filter OR clause, in order to return all entries
filt = '(|(objectclass=*)(objectclass=ldapsubentry))'
# We only need the DNs of the entries. Since no attributes
# will be returned, we do not have to specify attrsonly=1.
entries = conn.search_s(dn, scope, filt, attrlist=DN_ONLY)
if entries:
for dn in sorted((e[0] for e in entries),
key=len, reverse=True):
conn.delete_s(dn)
else:
LOG.debug('No entries in LDAP subtree %s', dn)
def add_member(self, member_dn, member_list_dn):
"""Add member to the member list.
:param member_dn: DN of member to be added.
:param member_list_dn: DN of group to which the
member will be added.
:raises: exception.Conflict: If the user was already a member.
self.NotFound: If the group entry didn't exist.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_ADD, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.TYPE_OR_VALUE_EXISTS:
raise exception.Conflict(_('Member %(member)s '
'is already a member'
' of group %(group)s') % {
'member': member_dn,
'group': member_list_dn})
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def remove_member(self, member_dn, member_list_dn):
"""Remove member from the member list.
:param member_dn: DN of member to be removed.
:param member_list_dn: DN of group from which the
member will be removed.
:raises: self.NotFound: If the group entry didn't exist.
ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_DELETE, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def _delete_tree_nodes(self, search_base, scope, query_params=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
query = (u'(&%s%s)' %
(query, ''.join(['(%s=%s)'
% (k, ldap.filter.escape_filter_chars(v))
for k, v in
six.iteritems(query_params)])))
not_deleted_nodes = []
with self.get_connection() as conn:
try:
nodes = conn.search_s(search_base, scope, query,
attrlist=DN_ONLY)
except ldap.NO_SUCH_OBJECT:
LOG.debug('Could not find entry with dn=%s', search_base)
raise self._not_found(self._dn_to_id(search_base))
else:
for node_dn, _t in nodes:
try:
conn.delete_s(node_dn)
except ldap.NO_SUCH_OBJECT:
not_deleted_nodes.append(node_dn)
if not_deleted_nodes:
LOG.warn(_("When deleting entries for %(search_base)s, could not"
" delete nonexistent entries %(entries)s%(dots)s"),
{'search_base': search_base,
'entries': not_deleted_nodes[:3],
'dots': '...' if len(not_deleted_nodes) > 3 else ''})
class EnabledEmuMixIn(BaseLdap):
"""Emulates boolean 'enabled' attribute if turned on.
Creates groupOfNames holding all enabled objects of this class, all missing
objects are considered disabled.
Options:
* $name_enabled_emulation - boolean, on/off
* $name_enabled_emulation_dn - DN of that groupOfNames, default is
cn=enabled_${name}s,${tree_dn}
Where ${name}s is the plural of self.options_name ('users' or 'tenants'),
${tree_dn} is self.tree_dn.
"""
def __init__(self, conf):
super(EnabledEmuMixIn, self).__init__(conf)
enabled_emulation = '%s_enabled_emulation' % self.options_name
self.enabled_emulation = getattr(conf.ldap, enabled_emulation)
enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name
self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn)
if not self.enabled_emulation_dn:
naming_attr_name = 'cn'
naming_attr_value = 'enabled_%ss' % self.options_name
sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn)
self.enabled_emulation_dn = '%s=%s,%s' % sub_vals
naming_attr = (naming_attr_name, [naming_attr_value])
else:
# Extract the attribute name and value from the configured DN.
naming_dn = ldap.dn.str2dn(utf8_encode(self.enabled_emulation_dn))
naming_rdn = naming_dn[0][0]
naming_attr = (utf8_decode(naming_rdn[0]),
utf8_decode(naming_rdn[1]))
self.enabled_emulation_naming_attr = naming_attr
def _get_enabled(self, object_id):
dn = self._id_to_dn(object_id)
query = '(member=%s)' % dn
with self.get_connection() as conn:
try:
enabled_value = conn.search_s(self.enabled_emulation_dn,
ldap.SCOPE_BASE,
query, ['cn'])
except ldap.NO_SUCH_OBJECT:
return False
else:
return bool(enabled_value)
def _add_enabled(self, object_id):
if not self._get_enabled(object_id):
modlist = [(ldap.MOD_ADD,
'member',
[self._id_to_dn(object_id)])]
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except ldap.NO_SUCH_OBJECT:
attr_list = [('objectClass', ['groupOfNames']),
('member', [self._id_to_dn(object_id)]),
self.enabled_emulation_naming_attr]
if self.use_dumb_member:
attr_list[1][1].append(self.dumb_member)
conn.add_s(self.enabled_emulation_dn, attr_list)
def _remove_enabled(self, object_id):
modlist = [(ldap.MOD_DELETE,
'member',
[self._id_to_dn(object_id)])]
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
pass
def create(self, values):
if self.enabled_emulation:
enabled_value = values.pop('enabled', True)
ref = super(EnabledEmuMixIn, self).create(values)
if 'enabled' not in self.attribute_ignore:
if enabled_value:
self._add_enabled(ref['id'])
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).create(values)
def get(self, object_id, ldap_filter=None):
ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
ref['enabled'] = self._get_enabled(object_id)
return ref
def get_all(self, ldap_filter=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
# had to copy BaseLdap.get_all here to ldap_filter by DN
tenant_list = [self._ldap_res_to_model(x)
for x in self._ldap_get_all(ldap_filter)
if x[0] != self.enabled_emulation_dn]
for tenant_ref in tenant_list:
tenant_ref['enabled'] = self._get_enabled(tenant_ref['id'])
return tenant_list
else:
return super(EnabledEmuMixIn, self).get_all(ldap_filter)
def update(self, object_id, values, old_obj=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
data = values.copy()
enabled_value = data.pop('enabled', None)
ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj)
if enabled_value is not None:
if enabled_value:
self._add_enabled(object_id)
else:
self._remove_enabled(object_id)
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).update(
object_id, values, old_obj)
def delete(self, object_id):
if self.enabled_emulation:
self._remove_enabled(object_id)
super(EnabledEmuMixIn, self).delete(object_id)
|
apache-2.0
| -522,299,333,275,910
| 38.969188
| 79
| 0.571266
| false
| 4.124942
| false
| false
| false
|
rphillips/bitbake
|
lib/bb/__init__.py
|
1
|
4327
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake Build System Python Library
#
# Copyright (C) 2003 Holger Schurig
# Copyright (C) 2003, 2004 Chris Larson
#
# Based on Gentoo's portage.py.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
__version__ = "1.11.0"
import sys
if sys.version_info < (2, 6, 0):
raise RuntimeError("Sorry, python 2.6.0 or later is required for this version of bitbake")
import os
import logging
import traceback
class NullHandler(logging.Handler):
def emit(self, record):
pass
Logger = logging.getLoggerClass()
class BBLogger(Logger):
def __init__(self, name):
if name.split(".")[0] == "BitBake":
self.debug = self.bbdebug
Logger.__init__(self, name)
def bbdebug(self, level, msg, *args, **kwargs):
return self.log(logging.DEBUG - level - 1, msg, *args, **kwargs)
def plain(self, msg, *args, **kwargs):
return self.log(logging.INFO + 1, msg, *args, **kwargs)
def verbose(self, msg, *args, **kwargs):
return self.log(logging.INFO - 1, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
return self.critical("%s\n%s" % (msg, traceback.format_exc()), *args, **kwargs)
logging.raiseExceptions = False
logging.setLoggerClass(BBLogger)
logger = logging.getLogger("BitBake")
logger.addHandler(NullHandler())
logger.setLevel(logging.INFO)
# This has to be imported after the setLoggerClass, as the import of bb.msg
# can result in construction of the various loggers.
import bb.msg
if "BBDEBUG" in os.environ:
level = int(os.environ["BBDEBUG"])
if level:
bb.msg.set_debug_level(level)
# Messaging convenience functions
def plain(*args):
logger.plain(''.join(args))
def debug(lvl, *args):
logger.debug(lvl, ''.join(args))
def note(*args):
logger.info(''.join(args))
def warn(*args):
logger.warn(''.join(args))
def error(*args):
logger.error(''.join(args))
def fatal(*args):
logger.critical(''.join(args))
sys.exit(1)
def deprecated(func, name = None, advice = ""):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
import warnings
if advice:
advice = ": %s" % advice
if name is None:
name = func.__name__
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s%s." % (name,
advice),
category = PendingDeprecationWarning,
stacklevel = 2)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
# For compatibility
def deprecate_import(current, modulename, fromlist, renames = None):
"""Import objects from one module into another, wrapping them with a DeprecationWarning"""
import sys
module = __import__(modulename, fromlist = fromlist)
for position, objname in enumerate(fromlist):
obj = getattr(module, objname)
newobj = deprecated(obj, "{0}.{1}".format(current, objname),
"Please use {0}.{1} instead".format(modulename, objname))
if renames:
newname = renames[position]
else:
newname = objname
setattr(sys.modules[current], newname, newobj)
deprecate_import(__name__, "bb.fetch", ("MalformedUrl", "encodeurl", "decodeurl"))
deprecate_import(__name__, "bb.utils", ("mkdirhier", "movefile", "copyfile", "which"))
deprecate_import(__name__, "bb.utils", ["vercmp_string"], ["vercmp"])
|
gpl-2.0
| 6,545,991,354,082,631,000
| 30.816176
| 94
| 0.645251
| false
| 3.68569
| false
| false
| false
|
akaariai/django-reverse-unique
|
reverse_unique_tests/models.py
|
1
|
3974
|
from __future__ import unicode_literals
from datetime import date
from django.db import models
from django.db.models import Q, F
from django.utils.translation import get_language
from reverse_unique import ReverseUnique
def filter_lang():
return Q(lang=get_language())
class Article(models.Model):
pub_date = models.DateField()
active_translation = ReverseUnique(
"ArticleTranslation", filters=filter_lang)
class Meta:
app_label = 'reverse_unique'
class Lang(models.Model):
code = models.CharField(max_length=2, primary_key=True)
class Meta:
app_label = 'reverse_unique'
class ArticleTranslation(models.Model):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
lang = models.ForeignKey(Lang, on_delete=models.CASCADE)
title = models.CharField(max_length=100)
abstract = models.CharField(max_length=100, null=True)
body = models.TextField()
class Meta:
unique_together = ('article', 'lang')
app_label = 'reverse_unique'
# The idea for DefaultTranslationArticle is that article's have default
# language. This allows testing of filter condition targeting both
# tables in the join.
class DefaultTranslationArticle(models.Model):
pub_date = models.DateField()
default_lang = models.CharField(max_length=2)
active_translation = ReverseUnique(
"DefaultTranslationArticleTranslation", filters=filter_lang)
default_translation = ReverseUnique(
"DefaultTranslationArticleTranslation", filters=Q(lang=F('article__default_lang')))
class Meta:
app_label = 'reverse_unique'
class DefaultTranslationArticleTranslation(models.Model):
article = models.ForeignKey(DefaultTranslationArticle, on_delete=models.CASCADE)
lang = models.CharField(max_length=2)
title = models.CharField(max_length=100)
abstract = models.CharField(max_length=100, null=True)
body = models.TextField()
class Meta:
unique_together = ('article', 'lang')
app_label = 'reverse_unique'
class Guest(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = 'reverse_unique'
def filter_reservations():
return Q(from_date__lte=date.today()) & (
Q(until_date__gte=date.today()) | Q(until_date__isnull=True))
class Room(models.Model):
current_reservation = ReverseUnique(
"Reservation", through='reservations',
filters=filter_reservations)
class Meta:
app_label = 'reverse_unique'
class Reservation(models.Model):
room = models.ForeignKey(Room, on_delete=models.CASCADE, related_name='reservations')
guest = models.ForeignKey(Guest, on_delete=models.CASCADE)
from_date = models.DateField()
until_date = models.DateField(null=True) # NULL means reservation "forever".
class Meta:
app_label = 'reverse_unique'
class Parent(models.Model):
rel1 = ReverseUnique("Rel1", filters=Q(f1="foo"))
uniq_field = models.CharField(max_length=10, unique=True, null=True)
class Meta:
app_label = 'reverse_unique'
class Rel1(models.Model):
parent = models.ForeignKey(Parent, on_delete=models.CASCADE, related_name="rel1list")
f1 = models.CharField(max_length=10)
class Meta:
app_label = 'reverse_unique'
class Child(Parent):
rel2 = ReverseUnique("Rel2", filters=Q(f1="foo"))
class Meta:
app_label = 'reverse_unique'
class AnotherChild(Child):
rel1_child = ReverseUnique("Rel1", filters=Q(f1__startswith="foo"))
class Meta:
app_label = 'reverse_unique'
class Rel2(models.Model):
child = models.ForeignKey(Child, on_delete=models.CASCADE, related_name="rel2list")
f1 = models.CharField(max_length=10)
class Meta:
app_label = 'reverse_unique'
class Rel3(models.Model):
a_model = models.ForeignKey(Parent, on_delete=models.CASCADE, to_field='uniq_field')
class Meta:
app_label = 'reverse_unique'
|
bsd-3-clause
| -8,017,831,775,881,084,000
| 26.79021
| 91
| 0.693256
| false
| 3.724461
| false
| false
| false
|
kobotoolbox/kpi
|
kpi/management/commands/remove_duplicate_assetversions.py
|
1
|
9498
|
# coding: utf-8
import json
from collections import defaultdict
from hashlib import md5
from django.core.management.base import BaseCommand
from django.db import transaction
from ...models import Asset, AssetVersion
ROUGH_BATCH_MEM_LIMIT_MB = 100
MAX_BATCH_SIZE = 100
def find_original_and_duplicate_versions(version_pks, asset_pk):
"""
Given a list of `AssetVersion` primary keys, returns a tuple of:
* a list of the original `AssetVersion` primary keys;
* a list of the duplicate primary keys;
* the batch size used to fetch the versions without memory exhaustion.
Duplicates are identified by the following method:
* Remove all `$kuid`s from `version_content['survey']` and
`version_content['choices']`;
* Serialize the modified `version_content`, `deployed_content`, `name`,
`_deployment_data`, and `deployed` to JSON;
* Calculate the MD5 digest of that JSON;
* Consider the first `AssetVersion` (ordered by `pk`) with a given MD5
to be the original, and any subsequent `AssetVersion`s with the
same MD5 to be duplicates.
:param version_pks: an iterable of `AssetVersion` primary keys to search
for duplicates. They MUST all belong to the same `Asset`.
:param asset_pk: the primary key of the `Asset` to which all versions
belong. This is required as a safety check.
"""
version_pks = sorted(version_pks)
digests_to_first_version_pks = defaultdict(list)
start = 0
batch_size = 1
batch_size_guessed = False
while True:
this_batch_version_pks = version_pks[start:start + batch_size]
if not this_batch_version_pks:
break
versions = AssetVersion.objects.filter(
asset_id=asset_pk,
pk__in=this_batch_version_pks
).order_by('pk')
for version in versions.iterator():
for kuid_containing in 'survey', 'choices':
try:
for item in version.version_content[kuid_containing]:
try:
del item['$kuid']
except KeyError:
pass
except KeyError:
continue
serialized = json.dumps((
version.deployed_content,
version.name,
version._deployment_data, # noqa
version.version_content,
version.deployed
), sort_keys=True)
digest = md5(serialized).digest()
digests_to_first_version_pks[digest].append({
'pk': version.pk,
'uid': version.uid,
})
start += batch_size
if not batch_size_guessed:
batch_size = max(
1, int(ROUGH_BATCH_MEM_LIMIT_MB * 1024 * 1024 / len(serialized)))
batch_size = min(batch_size, MAX_BATCH_SIZE)
batch_size_guessed = True
duplicates_of = {}
duplicate_version_pks = []
for (digest, matches) in digests_to_first_version_pks.items():
if len(matches) > 1:
duplicates_of[matches[0]['pk']] = [m['uid'] for m in matches[1:]]
duplicate_version_pks = duplicate_version_pks + [
m['pk'] for m in matches[1:]
]
return (
duplicates_of.keys(),
duplicate_version_pks,
duplicates_of,
batch_size,
)
class Command(BaseCommand):
help = (
'Remove duplicate `AssetVersion`s as identified by their content '
'(after stripping `$kuid`s). Output is tab-delimited with the '
'following columns:\n'
'\tUsername\n\tAsset UID\n\tOriginal Version Count\n'
'\tDuplicate Version Count\n'
'\tAsterisk If Deployed Version Is Duplicate\n'
'The currently deployed version will never be deleted.'
)
def add_arguments(self, parser):
parser.add_argument(
'--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Show information about duplicates but do not remove them'
)
parser.add_argument(
'--username',
action='store',
dest='username',
default=False,
help='Consider only versions owned by a specific user'
)
parser.add_argument(
'--asset-uid',
action='store',
dest='asset_uid',
default=False,
help='Consider only versions of the specified `Asset`'
)
def handle(self, *args, **options):
versions = AssetVersion.objects.order_by('pk')
username = options.get('username')
if username:
versions = versions.filter(asset__owner__username=username)
asset_uid = options.get('asset_uid')
if asset_uid:
versions = versions.filter(asset__uid=asset_uid)
# Trying to get the ORM to annotate each `Asset` with a count of its
# `AssetVersion`s is unusably slow
self.stderr.write('Listing versions (may take several seconds)...')
version_dump = versions.values_list('pk', 'asset_id')
versions_for_assets = defaultdict(list)
for version_pk, asset_pk in version_dump:
versions_for_assets[asset_pk].append(version_pk)
version_counts_for_assets = {
asset_pk: len(version_pks) for
asset_pk, version_pks in versions_for_assets.items()
}
# Sort descending by version count; the higher the version count, the
# more likely many of the versions are duplicates
assets_sorted_by_version_count = sorted(
version_counts_for_assets, key=version_counts_for_assets.get,
reverse=True
)
self.stderr.write(
'Found {} versions for {} assets; '
'maximum {} versions per asset'.format(
len(version_dump),
len(versions_for_assets),
version_counts_for_assets[assets_sorted_by_version_count[0]]
)
)
for asset_pk in assets_sorted_by_version_count:
with transaction.atomic():
asset_values = Asset.objects.filter(
pk=asset_pk
).values_list('owner__username', 'uid', '_deployment_data')
if not asset_values:
# Asset with this PK disappeared before we got to it
continue
username, uid, deployment_data = asset_values[0]
# Find the currently deployed version; we'll never delete it
# even if it's a duplicate
currently_deployed_uid = json.loads(deployment_data).get(
'version', None)
currently_deployed_pk = AssetVersion.objects.filter(
uid=currently_deployed_uid).values_list('pk', flat=True)
original_version_pks, duplicate_version_pks, duplicate_uids, \
batch_size = find_original_and_duplicate_versions(
versions_for_assets[asset_pk], asset_pk)
pks_to_delete = duplicate_version_pks
currently_deployed_is_duplicate = False
if currently_deployed_pk:
try:
# Don't delete the currently deployed version
pks_to_delete.remove(currently_deployed_pk[0])
except ValueError:
pass
else:
currently_deployed_is_duplicate = True
output = (
username,
uid,
len(original_version_pks),
len(duplicate_version_pks),
'*' if currently_deployed_is_duplicate else ''
)
self.stdout.write(('{}\t' * len(output)).format(*output))
if not options.get('dry_run'):
# Store the UIDs of all duplicate versions in the original
# version's `uid_aliases` field
for pk, new_uid_aliases in duplicate_uids.items():
version_qs = AssetVersion.objects.filter(pk=pk)
uid_aliases = version_qs.values_list(
'uid_aliases', flat=True)[0]
if not uid_aliases:
uid_aliases = new_uid_aliases
else:
uid_aliases.extend(new_uid_aliases)
version_qs.update(uid_aliases=uid_aliases)
# Haha, silly programmer: you thought you could delete all
# these versions at once without memory exhaustion?!
# There are FKs (e.g. from `AssetSnapshot`) that require
# Django to take the slow path for cascade deletion
start = 0
while True:
this_batch_version_pks = pks_to_delete[
start:start + batch_size]
if not this_batch_version_pks:
break
AssetVersion.objects.filter(
pk__in=this_batch_version_pks
).delete()
start += batch_size
|
agpl-3.0
| 5,937,014,375,773,076,000
| 39.589744
| 81
| 0.540114
| false
| 4.535817
| false
| false
| false
|
mihaelacr/pydeeplearn
|
code/lib/trainingoptions.py
|
1
|
1144
|
""" Defines a training options class as a holder for options that can be passed
for training a neural network.
"""
__author__ = "Mihaela Rosca"
__contact__ = "mihaela.c.rosca@gmail.com"
import numpy as np
# TODO: move from common here
import common
class TrainingOptions(object):
def __init__(self, miniBatchSize,
learningRate,
momentumMax=0.0,
rmsprop=False,
weightDecayL1=0.0,
weightDecayL2=0.0,
nesterovMomentum=False,
save_best_weights=False,
momentumForEpochFunction=common.getMomentumForEpochLinearIncrease,
momentumFactorForLearningRate=False):
self.miniBatchSize = miniBatchSize
self.learningRate = learningRate
self.momentumMax = np.float32(momentumMax)
self.rmsprop = rmsprop
self.weightDecayL1 = weightDecayL1
self.weightDecayL2 = weightDecayL2
self.nesterov = nesterovMomentum
self.momentumFactorForLearningRate = momentumFactorForLearningRate
self.momentumForEpochFunction = momentumForEpochFunction
self.batchLearningRate = np.float32(learningRate / miniBatchSize)
self.save_best_weights = save_best_weights
|
bsd-3-clause
| -2,303,735,449,736,076,000
| 32.647059
| 79
| 0.733392
| false
| 3.530864
| false
| false
| false
|
Goldmund-Wyldebeast-Wunderliebe/raven-python
|
raven/contrib/django/client.py
|
1
|
6779
|
"""
raven.contrib.django.client
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.http import HttpRequest
from django.template import TemplateSyntaxError
from django.template.loader import LoaderOrigin
from raven.base import Client
from raven.contrib.django.utils import get_data_from_template, get_host
from raven.contrib.django.middleware import SentryLogMiddleware
from raven.utils.wsgi import get_headers, get_environ
__all__ = ('DjangoClient',)
class DjangoClient(Client):
logger = logging.getLogger('sentry.errors.client.django')
def is_enabled(self):
return bool(self.servers or 'sentry' in settings.INSTALLED_APPS)
def get_user_info(self, user):
if not user.is_authenticated():
return {'is_authenticated': False}
user_info = {
'id': user.pk,
'is_authenticated': True,
}
if hasattr(user, 'email'):
user_info['email'] = user.email
if hasattr(user, 'get_username'):
user_info['username'] = user.get_username()
elif hasattr(user, 'username'):
user_info['username'] = user.username
return user_info
def get_data_from_request(self, request):
try:
from django.contrib.auth.models import AbstractBaseUser as BaseUser
except ImportError:
from django.contrib.auth.models import User as BaseUser # NOQA
result = {}
if hasattr(request, 'user') and isinstance(request.user, BaseUser):
result['sentry.interfaces.User'] = self.get_user_info(request.user)
try:
uri = request.build_absolute_uri()
except SuspiciousOperation:
# attempt to build a URL for reporting as Django won't allow us to
# use get_host()
if request.is_secure():
scheme = 'https'
else:
scheme = 'http'
host = get_host(request)
uri = '%s://%s%s' % (scheme, host, request.path)
if request.method != 'GET':
try:
data = request.body
except:
try:
data = request.raw_post_data
except Exception:
# assume we had a partial read.
try:
data = request.POST or '<unavailable>'
except Exception:
data = '<unavailable>'
else:
data = None
environ = request.META
result.update({
'sentry.interfaces.Http': {
'method': request.method,
'url': uri,
'query_string': request.META.get('QUERY_STRING'),
'data': data,
'cookies': dict(request.COOKIES),
'headers': dict(get_headers(environ)),
'env': dict(get_environ(environ)),
}
})
return result
def build_msg(self, *args, **kwargs):
data = super(DjangoClient, self).build_msg(*args, **kwargs)
stacks = (
data.get('sentry.interfaces.Stacktrace'),
data.get('sentry.interfaces.Exception', {}).get('stacktrace'),
)
for stacktrace in filter(bool, stacks):
for frame in stacktrace['frames']:
module = frame.get('module')
if not module:
continue
if module.startswith('django.'):
frame['in_app'] = False
if not self.site and 'django.contrib.sites' in settings.INSTALLED_APPS:
try:
from django.contrib.sites.models import Site
site = Site.objects.get_current()
site_name = site.name or site.domain
data['tags'].setdefault('site', site_name)
except Exception:
# Database error? Fallback to the id
data['tags'].setdefault('site', settings.SITE_ID)
return data
def capture(self, event_type, request=None, **kwargs):
if 'data' not in kwargs:
kwargs['data'] = data = {}
else:
data = kwargs['data']
if request is None:
request = getattr(SentryLogMiddleware.thread, 'request', None)
is_http_request = isinstance(request, HttpRequest)
if is_http_request:
data.update(self.get_data_from_request(request))
if kwargs.get('exc_info'):
exc_value = kwargs['exc_info'][1]
# As of r16833 (Django) all exceptions may contain a ``django_template_source`` attribute (rather than the
# legacy ``TemplateSyntaxError.source`` check) which describes template information.
if hasattr(exc_value, 'django_template_source') or ((isinstance(exc_value, TemplateSyntaxError) and
isinstance(getattr(exc_value, 'source', None), (tuple, list)) and isinstance(exc_value.source[0], LoaderOrigin))):
source = getattr(exc_value, 'django_template_source', getattr(exc_value, 'source', None))
if source is None:
self.logger.info('Unable to get template source from exception')
data.update(get_data_from_template(source))
result = super(DjangoClient, self).capture(event_type, **kwargs)
if is_http_request and result:
# attach the sentry object to the request
request.sentry = {
'project_id': data.get('project', self.project),
'id': self.get_ident(result),
}
return result
def send(self, **kwargs):
"""
Serializes and signs ``data`` and passes the payload off to ``send_remote``
If ``servers`` was passed into the constructor, this will serialize the data and pipe it to
each server using ``send_remote()``. Otherwise, this will communicate with ``sentry.models.GroupedMessage``
directly.
"""
if self.servers:
return super(DjangoClient, self).send(**kwargs)
elif 'sentry' in settings.INSTALLED_APPS:
try:
return self.send_integrated(kwargs)
except Exception as e:
self.error_logger.error(
'Unable to record event: %s\nEvent was: %r', e,
kwargs['message'], exc_info=True)
def send_integrated(self, kwargs):
from sentry.models import Group
return Group.objects.from_kwargs(**kwargs)
|
bsd-3-clause
| 4,290,953,358,476,939,000
| 34.678947
| 129
| 0.569258
| false
| 4.474587
| false
| false
| false
|
dallingham/regenerate
|
regenerate/writers/c_defines.py
|
1
|
3670
|
#
# Manage registers in a hardware design
#
# Copyright (C) 2008 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
CWriter - Writes out C defines representing the register addresses
"""
from writer_base import WriterBase, ExportInfo
from regenerate.extras import full_token, in_groups
import os
HEADER = [
"/*------------------------------------------------------------------\n",
" * File : $f$\n",
" * Author : $U$\n",
" * Created : $D$\n",
" * Block : $M$\n",
" *\n",
" * -----------------------------------------------------------------\n",
" * Copyright $Y$. All rights reserved.\n",
" *------------------------------------------------------------------\n",
" */\n",
"#ifndef __$F$\n",
"#define __$F$ 1\n",
"\n",
]
TRAILER = ["#endif\n"]
REG_TYPE = {
8: "unsigned char*",
16: "unsigned short*",
32: "unsigned long*",
64: "unsigned long long*",
}
class CDefines(WriterBase):
"""
Writes out C defines representing the register addresses
"""
def __init__(self, project, dbase):
WriterBase.__init__(self, project, dbase)
self._ofile = None
def write_def(self, reg, data, base):
"""
Writes the definition in the format of:
#define register (address)
"""
address = reg.address + base + data.base
if data.repeat > 1:
for i in range(0, data.repeat):
name = full_token(data.group, reg.token,
self._dbase.module_name, i, data.format)
address += (i * data.roffset)
self._ofile.write("#define %-30s (*((volatile %s)0x%x))\n" %
(name, REG_TYPE[reg.width], address))
else:
name = full_token(data.group, reg.token, self._dbase.module_name,
-1, data.format)
self._ofile.write("#define %-30s (*((volatile %s)0x%x))\n" %
(name, REG_TYPE[reg.width], address))
def write(self, filename):
"""
Writes the output file
"""
self._filename = os.path.basename(filename)
with open(filename, "w") as self._ofile:
self.write_header(self._ofile, "".join(HEADER))
addr_maps = self._project.get_address_maps()
if len(addr_maps) > 0:
base = self._project.get_address_base(addr_maps[0].name)
for data in in_groups(self._dbase.module_name, self._project):
for register in self._dbase.get_all_registers():
self.write_def(register, data, base)
self._ofile.write('\n')
for line in TRAILER:
self._ofile.write('%s\n' % line.replace('$M$', self._module))
EXPORTERS = [
(WriterBase.TYPE_BLOCK, ExportInfo(CDefines, ("Header files", "C Source"),
"C header files", ".h", 'headers-c'))
]
|
gpl-2.0
| -6,252,778,430,328,302,000
| 32.981481
| 79
| 0.53188
| false
| 3.937768
| false
| false
| false
|
sh-chris110/chris
|
python/cube.py
|
1
|
5099
|
import sys
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from PIL import Image
class MyPyOpenGLTest:
def __init__(self,
width=640,
height=480,
title='MyPyOpenGLTest'.encode()):
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(width, height)
self.window = glutCreateWindow(title)
glutDisplayFunc(self.Draw)
glutIdleFunc(self.Draw)
self.InitGL(width, height)
self.x = 0.0
self.y = 0.0
self.z = 0.0
def Draw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glTranslate(0.0, 0.0, -5.0)
glRotatef(self.x, 1.0, 0.0, 0.0)
glRotatef(self.y, 0.0, 1.0, 0.0)
glRotatef(self.z, 0.0, 0.0, 1.0)
glBindTexture(GL_TEXTURE_2D, 0)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0)
glVertex3f(-1.0, -1.0, 1.0)
glTexCoord2f(1.0, 0.0)
glVertex3f(1.0, -1.0, 1.0)
glTexCoord2f(1.0, 1.0)
glVertex3f(1.0, 1.0, 1.0)
glTexCoord2f(0.0, 1.0)
glVertex3f(-1.0, 1.0, 1.0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 1)
glBegin(GL_QUADS)
glTexCoord2f(1.0, 0.0)
glVertex3f(-1.0, -1.0, -1.0)
glTexCoord2f(1.0, 1.0)
glVertex3f(-1.0, 1.0, -1.0)
glTexCoord2f(0.0, 1.0)
glVertex3f(1.0, 1.0, -1.0)
glTexCoord2f(0.0, 0.0)
glVertex3f(1.0, -1.0, -1.0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 2)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 1.0)
glVertex3f(-1.0, 1.0, -1.0)
glTexCoord2f(0.0, 0.0)
glVertex3f(-1.0, 1.0, 1.0)
glTexCoord2f(1.0, 0.0)
glVertex3f(1.0, 1.0, 1.0)
glTexCoord2f(1.0, 1.0)
glVertex3f(1.0, 1.0, -1.0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 3)
glBegin(GL_QUADS)
glTexCoord2f(1.0, 1.0)
glVertex3f(-1.0, -1.0, -1.0)
glTexCoord2f(0.0, 1.0)
glVertex3f(1.0, -1.0, -1.0)
glTexCoord2f(0.0, 0.0)
glVertex3f(1.0, -1.0, 1.0)
glTexCoord2f(1.0, 0.0)
glVertex3f(-1.0, -1.0, 1.0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 4)
glBegin(GL_QUADS)
glTexCoord2f(1.0, 0.0)
glVertex3f(1.0, -1.0, -1.0)
glTexCoord2f(1.0, 1.0)
glVertex3f(1.0, 1.0, -1.0)
glTexCoord2f(0.0, 1.0)
glVertex3f(1.0, 1.0, 1.0)
glTexCoord2f(0.0, 0.0)
glVertex3f(1.0, -1.0, 1.0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 5)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0)
glVertex3f(-1.0, -1.0, -1.0)
glTexCoord2f(1.0, 0.0)
glVertex3f(-1.0, -1.0, 1.0)
glTexCoord2f(1.0, 1.0)
glVertex3f(-1.0, 1.0, 1.0)
glTexCoord2f(0.0, 1.0)
glVertex3f(-1.0, 1.0, -1.0)
glEnd()
glutSwapBuffers()
self.x += 0.4
self.y += 0.6
self.z += 0.2
def LoadTexture(self):
imgFiles = [str(i)+'.jpeg' for i in range(1,7)]
for i in range(6):
img = Image.open(imgFiles[i])
width, height = img.size
img = img.tobytes('raw', 'RGBX', 0, -1)
glGenTextures(2)
glBindTexture(GL_TEXTURE_2D, i)
glTexImage2D(GL_TEXTURE_2D, 0, 4,
width, height, 0, GL_RGBA,
GL_UNSIGNED_BYTE,img)
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_WRAP_T, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexEnvf(GL_TEXTURE_ENV,
GL_TEXTURE_ENV_MODE, GL_DECAL)
def InitGL(self, width, height):
self.LoadTexture()
glEnable(GL_TEXTURE_2D)
glClearColor(1.0, 1.0, 1.0, 0.0)
glClearDepth(1.0)
glDepthFunc(GL_LESS)
glShadeModel(GL_SMOOTH)
glEnable(GL_CULL_FACE)
glCullFace(GL_BACK)
glEnable(GL_POINT_SMOOTH)
glEnable(GL_LINE_SMOOTH)
glEnable(GL_POLYGON_SMOOTH)
glMatrixMode(GL_PROJECTION)
glHint(GL_POINT_SMOOTH_HINT,GL_NICEST)
glHint(GL_LINE_SMOOTH_HINT,GL_NICEST)
glHint(GL_POLYGON_SMOOTH_HINT,GL_FASTEST)
glLoadIdentity()
gluPerspective(45.0, float(width)/float(height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
def MainLoop(self):
glutMainLoop()
if __name__ == '__main__':
w = MyPyOpenGLTest()
w.MainLoop()
|
gpl-2.0
| -7,546,414,518,466,473,000
| 30.670807
| 68
| 0.513434
| false
| 2.707913
| false
| false
| false
|
m1093782566/openstack_org_ceilometer
|
ceilometer/openstack/common/fileutils.py
|
1
|
4033
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import os
import tempfile
from oslo.utils import excutils
from ceilometer.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload:
delete_cached_file(filename)
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug("Reloading cached file %s" % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_cached_file(filename):
"""Delete cached file if present.
:param filename: filename to delete
"""
global _FILE_CACHE
if filename in _FILE_CACHE:
del _FILE_CACHE[filename]
def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
:param remove: Optional function to remove passed path
"""
try:
remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
:param remove: Optional function to remove passed path
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
remove(path)
def file_open(*args, **kwargs):
"""Open file
see built-in open() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return open(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
"""Create temporary file or use existing file.
This util is needed for creating temporary file with
specified content, suffix and prefix. If path is not None,
it will be used for writing content. If the path doesn't
exist it'll be created.
:param content: content for temporary file.
:param path: same as parameter 'dir' for mkstemp
:param suffix: same as parameter 'suffix' for mkstemp
:param prefix: same as parameter 'prefix' for mkstemp
For example: it can be used in database tests for creating
configuration files.
"""
if path:
ensure_tree(path)
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
try:
os.write(fd, content)
finally:
os.close(fd)
return path
|
apache-2.0
| 1,907,550,172,417,568,500
| 26.435374
| 78
| 0.661294
| false
| 4.08612
| false
| false
| false
|
khughitt/cats
|
test/formatters/test_fasta.py
|
1
|
1635
|
"""
FASTAFormatter tests.
"""
import cats
import io
import os
import pytest
class TestFASTAFormatter:
@pytest.mark.parametrize('filename', [
'dna.fasta',
'dna_grep.fasta',
'dna.txt',
'dna_grep.txt',
'dna_zgrep.txt',
'rna.txt',
'rna_grep.txt',
'protein.fasta'
])
def test_format(self, filename):
"""Test FASTA formatting"""
testdir = os.path.abspath(os.path.join(os.getcwd(),
__file__, '..', '..'))
# input
infile = os.path.join(testdir, 'input', filename)
# output
output = io.StringIO()
cats.format(infile, outbuffer=output, theme='default')
output.seek(0)
with open(os.path.join(testdir, 'output', filename)) as fp:
expected = fp.read()
assert str(output.read()) == expected
@pytest.mark.parametrize('input_file,output_file', [
('dna.fasta', 'dna.fasta-trans')
])
def test_fasta_translate(self, input_file, output_file):
"""Test FASTA DNA->Protein translation"""
testdir = os.path.abspath(os.path.join(os.getcwd(),
__file__, '..', '..'))
# input
infile = os.path.join(testdir, 'input', input_file)
# output
output = io.StringIO()
cats.format(infile, outbuffer=output, theme='default', translate=True)
output.seek(0)
with open(os.path.join(testdir, 'output', output_file)) as fp:
expected = fp.read()
assert str(output.read()) == expected
|
bsd-2-clause
| 1,545,673,285,899,150,600
| 28.196429
| 78
| 0.529052
| false
| 3.811189
| true
| false
| false
|
USDA-ARS-NWRC/AWSF
|
awsm/interface/smrf_ipysnobal.py
|
1
|
12538
|
"""
Functions for running PySnobal as well as SMRF and Pysnobal
threaded together
20170731 Micah Sandusky
"""
from datetime import datetime
import numpy as np
import pandas as pd
import pytz
import smrf.framework
from topocalc.shade import shade
from smrf.envphys import sunang
from smrf.utils import queue
from awsm.interface import ipysnobal, interface, initialize_model as initmodel, \
pysnobal_io as io_mod
from awsm.interface.ingest_data import StateUpdater
from pysnobal.c_snobal import snobal
def run_ipysnobal(myawsm):
"""
Function to run PySnobal from netcdf of ipw forcing data,
not from SMRF instance.
Args:
myawsm: awsm class
"""
# initialize ipysnobal state
# get dem
dem = myawsm.topo.dem
myawsm._logger.info('Initializing from files')
options, params, tstep_info, init, output_rec = \
ipysnobal.init_from_smrf(myawsm, dem=dem)
data_tstep = tstep_info[0]['time_step']
timeSinceOut = 0.0
start_step = 0 # if restart then it would be higher if this were iSnobal
step_time = start_step * data_tstep
output_rec['current_time'] = step_time * \
np.ones(output_rec['elevation'].shape)
output_rec['time_since_out'] = timeSinceOut * \
np.ones(output_rec['elevation'].shape)
myawsm._logger.info('getting inputs for first timestep')
if myawsm.forcing_data_type == 'netcdf':
force = io_mod.open_files_nc(myawsm)
input1 = initmodel.get_timestep_netcdf(
force, options['time']['date_time'][0])
else:
input_list, ppt_list = io_mod.open_files_ipw(myawsm)
input1 = initmodel.get_timestep_ipw(options['time']['date_time'][0],
input_list, ppt_list, myawsm)
# initialize updater if required
if myawsm.update_depth:
updater = StateUpdater(myawsm)
else:
updater = None
myawsm._logger.info('starting PySnobal time series loop')
j = 1
# run PySnobal
for tstep in options['time']['date_time'][1:]:
# for tstep in options['time']['date_time'][953:958]:
myawsm._logger.info('running PySnobal for timestep: {}'.format(tstep))
if myawsm.forcing_data_type == 'netcdf':
input2 = initmodel.get_timestep_netcdf(force, tstep)
else:
input2 = initmodel.get_timestep_ipw(
tstep, input_list, ppt_list, myawsm)
first_step = j
# update depth if necessary
if updater is not None:
if tstep in updater.update_dates:
start_z = output_rec['z_s'].copy()
output_rec = \
updater.do_update_pysnobal(output_rec, tstep)
first_step = 1
rt = snobal.do_tstep_grid(input1, input2, output_rec, tstep_info,
options['constants'], params, first_step=first_step,
nthreads=myawsm.ipy_threads)
if rt != -1:
raise ValueError(
'ipysnobal error on time step %s, pixel %i' % (tstep, rt))
# break
input1 = input2.copy()
# output at the frequency and the last time step
if ((j)*(data_tstep/3600.0) % options['output']['frequency'] == 0) \
or (j == len(options['time']['date_time']) - 1):
myawsm._logger.info('Outputting {}'.format(tstep))
io_mod.output_timestep(output_rec, tstep, options,
myawsm.pysnobal_output_vars)
output_rec['time_since_out'] = np.zeros(
output_rec['elevation'].shape)
myawsm._logger.info('Finished timestep: {}'.format(tstep))
j += 1
# if input has run_for_nsteps, make sure not to go past it
if myawsm.run_for_nsteps is not None:
if j > myawsm.run_for_nsteps:
break
# close input files
if myawsm.forcing_data_type == 'netcdf':
io_mod.close_files(force)
def run_smrf_ipysnobal(myawsm):
"""
Function to run SMRF and pass outputs in memory to python wrapped
iSnobal.
Args:
myawsm: AWSM instance
"""
# first create config to run smrf
smrf_cfg = interface.create_smrf_config(myawsm)
# start = datetime.now()
# initialize
with smrf.framework.SMRF(smrf_cfg, myawsm._logger) as s:
# if input has run_for_nsteps, make sure not to go past it
if myawsm.run_for_nsteps is not None:
change_in_hours = int(myawsm.run_for_nsteps *
s.config['time']['time_step']/60)
# recalculate end_date before initializing run
s.end_date = s.start_date + pd.to_timedelta(change_in_hours - 1,
unit='h')
myawsm.end_date = s.end_date
s.date_time = s.date_time[:myawsm.run_for_nsteps]
s.time_steps = myawsm.run_for_nsteps
# load topo data
s.loadTopo()
# 3. initialize the distribution
s.create_distribution()
# load weather data and station metadata
s.loadData()
# run threaded or not
if s.threading:
run_smrf_ipysnobal_threaded(myawsm, s)
else:
run_smrf_ipysnobal_single(myawsm, s)
s._logger.debug('DONE!!!!')
def run_smrf_ipysnobal_single(myawsm, s):
"""
Running smrf and PySnobal in non-threaded application.
Args:
myawsm: awsm class
s: smrf class
"""
# -------------------------------------
# Initialize the distibution
s.initialize_distribution()
# -------------------------------------
# initialize ipysnobal state
options, params, tstep_info, init, output_rec = \
ipysnobal.init_from_smrf(myawsm, s)
# -------------------------------------
# create variable list
force_variables = ['thermal', 'air_temp', 'vapor_pressure', 'wind_speed',
'net_solar', 'soil_temp', 'precip', 'percent_snow',
'snow_density', 'precip_temp']
# Collect the potential output variables
possible_output_variables = {}
for variable, module in s.distribute.items():
possible_output_variables.update(module.output_variables)
variable_list = {}
for force_variable in force_variables:
if force_variable in possible_output_variables.keys():
module = possible_output_variables[force_variable]['module']
variable_list[force_variable] = {
'variable': force_variable,
'module': module
}
else:
raise ValueError('Not distributing necessary '
'variables to run PySnobal!')
# -------------------------------------
# initialize updater if required
if myawsm.update_depth:
updater = StateUpdater(myawsm)
else:
updater = None
# initialize pysnobal run class
my_pysnobal = ipysnobal.PySnobal(s.date_time,
variable_list,
myawsm.pysnobal_output_vars,
options,
params,
tstep_info,
init,
output_rec,
s.topo.nx,
s.topo.ny,
myawsm.soil_temp,
myawsm._logger,
myawsm.tzinfo)
# -------------------------------------
# Distribute the data
for output_count, t in enumerate(s.date_time):
# wait here for the model to catch up if needed
startTime = datetime.now()
s._logger.info('Distributing time step %s' % t)
# 0.1 sun angle for time step
cosz, azimuth, rad_vec = sunang.sunang(
t.astimezone(pytz.utc),
s.topo.basin_lat,
s.topo.basin_long,
)
# 0.2 illumination angle
illum_ang = None
if cosz > 0:
illum_ang = shade(
s.topo.sin_slope,
s.topo.aspect,
azimuth,
cosz)
# 1. Air temperature
s.distribute['air_temp'].distribute(s.data.air_temp.loc[t])
# 2. Vapor pressure
s.distribute['vapor_pressure'].distribute(
s.data.vapor_pressure.loc[t],
s.distribute['air_temp'].air_temp)
# 3. Wind_speed and wind_direction
s.distribute['wind'].distribute(
s.data.wind_speed.loc[t],
s.data.wind_direction.loc[t],
t)
# 4. Precipitation
s.distribute['precipitation'].distribute(
s.data.precip.loc[t],
s.distribute['vapor_pressure'].dew_point,
s.distribute['vapor_pressure'].precip_temp,
s.distribute['air_temp'].air_temp,
t,
s.data.wind_speed.loc[t],
s.data.air_temp.loc[t],
s.distribute['wind'].wind_direction,
s.distribute['wind'].wind_model.dir_round_cell,
s.distribute['wind'].wind_speed,
s.distribute['wind'].wind_model.cellmaxus
)
# 5. Albedo
s.distribute['albedo'].distribute(
t,
illum_ang,
s.distribute['precipitation'].storm_days
)
# 6. cloud factor
s.distribute['cloud_factor'].distribute(s.data.cloud_factor.loc[t])
# 7. solar
s.distribute['solar'].distribute(
t,
s.distribute["cloud_factor"].cloud_factor,
illum_ang,
cosz,
azimuth,
s.distribute['albedo'].albedo_vis,
s.distribute['albedo'].albedo_ir)
# 7. thermal radiation
if s.distribute['thermal'].gridded and \
s.config['gridded']['data_type'] != 'hrrr_grib':
s.distribute['thermal'].distribute_thermal(
s.data.thermal.loc[t],
s.distribute['air_temp'].air_temp)
else:
s.distribute['thermal'].distribute(
t,
s.distribute['air_temp'].air_temp,
s.distribute['vapor_pressure'].vapor_pressure,
s.distribute['vapor_pressure'].dew_point,
s.distribute['cloud_factor'].cloud_factor)
# 8. Soil temperature
s.distribute['soil_temp'].distribute()
# 9. pass info to PySnobal
if output_count == 0:
my_pysnobal.run_single_fist_step(s)
elif output_count > 0:
my_pysnobal.run_single(t, s, updater)
else:
raise ValueError('Problem with times in run ipysnobal single')
telapsed = datetime.now() - startTime
s._logger.debug('{0:.2f} seconds for time step'
.format(telapsed.total_seconds()))
s.forcing_data = 1
def run_smrf_ipysnobal_threaded(myawsm, s):
"""
Function to run SMRF (threaded) and pass outputs in memory to python wrapped
iSnobal. iPySnobal has replaced the output queue in this implimentation.
Args:
myawsm: AWSM instance
s: SMRF instance
"""
# initialize ipysnobal state
options, params, tstep_info, init, output_rec = \
ipysnobal.init_from_smrf(myawsm, s)
s.create_data_queue()
s.set_queue_variables()
s.create_distributed_threads(['isnobal'])
s.smrf_queue['isnobal'] = queue.DateQueueThreading(
s.queue_max_values,
s.time_out,
name='isnobal')
del s.smrf_queue['output']
# initialize updater if required
if myawsm.update_depth:
updater = StateUpdater(myawsm)
else:
updater = None
# isnobal thread
s.threads.append(ipysnobal.QueueIsnobal(
s.smrf_queue,
s.date_time,
s.thread_queue_variables,
myawsm.pysnobal_output_vars,
options,
params,
tstep_info,
init,
output_rec,
s.topo.nx,
s.topo.ny,
myawsm.soil_temp,
myawsm._logger,
myawsm.tzinfo,
updater))
# the cleaner
s.threads.append(queue.QueueCleaner(s.date_time, s.smrf_queue))
# start all the threads
for i in range(len(s.threads)):
s.threads[i].start()
for i in range(len(s.threads)):
s.threads[i].join()
|
gpl-3.0
| -4,230,425,102,818,562,600
| 30.822335
| 86
| 0.544664
| false
| 3.702894
| false
| false
| false
|
XianliangJ/collections
|
DCTCPTest/plot_queue.py
|
1
|
2255
|
'''
Plot queue occupancy over time
'''
from helper import *
import plot_defaults
from matplotlib.ticker import MaxNLocator
from pylab import figure
parser = argparse.ArgumentParser()
parser.add_argument('--files', '-f',
help="Queue timeseries output to one plot",
required=True,
action="store",
nargs='+',
dest="files")
parser.add_argument('--legend', '-l',
help="Legend to use if there are multiple plots. File names used as default.",
action="store",
nargs="+",
default=None,
dest="legend")
parser.add_argument('--out', '-o',
help="Output png file for the plot.",
default=None, # Will show the plot
dest="out")
parser.add_argument('--labels',
help="Labels for x-axis if summarising; defaults to file names",
required=False,
default=[],
nargs="+",
dest="labels")
parser.add_argument('--every',
help="If the plot has a lot of data points, plot one of every EVERY (x,y) point (default 1).",
default=1,
type=int)
args = parser.parse_args()
if args.legend is None:
args.legend = []
for file in args.files:
args.legend.append(file)
to_plot=[]
def get_style(i):
if i == 0:
return {'color': 'red'}
else:
return {'color': 'blue', 'ls': '-.'}
m.rc('figure', figsize=(16, 6))
fig = figure()
ax = fig.add_subplot(111)
for i, f in enumerate(args.files):
data = read_list(f)
xaxis = map(float, col(0, data))
start_time = xaxis[0]
xaxis = map(lambda x: x - start_time, xaxis)
qlens = map(float, col(1, data))
xaxis = xaxis[::args.every]
qlens = qlens[::args.every]
ax.plot(xaxis, qlens, lw=2, **get_style(i))
ax.xaxis.set_major_locator(MaxNLocator(4))
plt.legend(args.legend)
plt.ylabel("Queue occupancy (bytes)")
plt.grid(True)
plt.xlabel("Time elapsed (in sec)")
if args.out:
print 'saving to', args.out
plt.savefig(args.out)
else:
plt.show()
|
gpl-3.0
| 1,796,765,571,080,518,000
| 26.839506
| 114
| 0.534812
| false
| 3.770903
| false
| false
| false
|
diggcoin/diggcoin
|
qa/rpc-tests/p2p-acceptblock.py
|
1
|
12336
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("DIGGCOIND", "diggcoind"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in xrange(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print "First height 2 block accepted by both nodes"
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in xrange(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print "Second height 2 block accepted only from whitelisted peer"
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in xrange(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print "Unrequested more-work block accepted from non-whitelisted peer"
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print "Successfully reorged to length 3 chain from whitelisted peer"
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in xrange(2):
for i in xrange(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print "Unrequested block too far-ahead not processed"
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print "Unrequested block far ahead of tip accepted from whitelisted peer"
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print "Unrequested block that would complete more-work chain was ignored"
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print "Inv at tip triggered getdata for unprocessed block"
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print "Successfully reorged to longer chain from non-whitelisted peer"
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
|
mit
| -6,203,074,944,724,320,000
| 41.537931
| 107
| 0.634484
| false
| 3.912464
| true
| false
| false
|
hunter-87/binocular-dense-stereo
|
sfm_templeRing.py
|
1
|
4749
|
import cv2
import numpy as np
import scipy.spatial
#!/usr/bin/env python
#coding: utf8
import os
from matplotlib.pyplot import subplot
import matplotlib.pyplot as plt
# figsize(12,8)
T1 = cv2.imread('../dataset_templeRing/templeR0034.png', cv2.IMREAD_GRAYSCALE)
sift = cv2.SIFT(nfeatures=5000)
kpts1, D_i = sift.detectAndCompute(T1, mask=None)
K1 = np.array([[k.pt[0], k.pt[1]] for k in kpts1])
T2 = cv2.imread('../dataset_templeRing/templeR0036.png', cv2.IMREAD_GRAYSCALE)
sift = cv2.SIFT(nfeatures=5000)
kpts2, D_j = sift.detectAndCompute(T2, mask=None)
K2 = np.array([[k.pt[0], k.pt[1]] for k in kpts2])
subplot(1,2,1)
cv2.plot(K1[:,0], K1[:,1], 'rx')
cv2.imshow(T1, cmap=np.cm.gray)
cv2.title('Temple 34')
subplot(1,2,2)
cv2.plot(K2[:,0], K2[:,1], 'rx')
cv2.imshow(T2, cmap=np.cm.gray)
cv2.title('Temple 36')
from sklearn.decomposition import PCA
pca = PCA(n_components=10)
pca.fit(D_i)
D_i = pca.transform(D_i)
D_j = pca.transform(D_j)
import scipy.spatial
kdtree_j = scipy.spatial.cKDTree(D_j)
N_i = D_i.shape[0]
d, nn = kdtree_j.query(D_i, k=2)
ratio_mask = d[:,0]/d[:,1] < 0.6
m = np.vstack((np.arange(N_i), nn[:,0])).T
m = m[ratio_mask]
# Filtering: If more than one feature in I matches the same feature in J,
# we remove all of these matches
h = {nj:0 for nj in m[:,1]}
for nj in m[:,1]:
h[nj] += 1
m = np.array([(ni, nj) for ni, nj in m if h[nj] == 1])
def rcolor():
return (np.random.rand(),np. random.rand(), np.random.rand())
def show_matches(matches):
n_rows, n_cols = T1.shape
display = np.zeros( (n_rows, 2 * n_cols), dtype=np.uint8 )
display[:,0:n_cols] = T1
display[:,n_cols:] = T2
for pi, pj in matches:
cv2.plot([K1[pi][0], K2[pj][0] + n_cols],
[K1[pi][1], K2[pj][1]],
marker='o', linestyle='-', color=rcolor())
cv2.imshow(display, cmap=np.cm.gray)
show_matches(m)
xi = K1[m[:,0],:]
xj = K2[m[:,1],:]
F, status = cv2.findFundamentalMat(xi, xj, cv2.FM_RANSAC, 0.5, 0.9)
assert(np.det(F) < 1.e-7)
is_inlier = np.array(status == 1).reshape(-1)
inlier_i = xi[is_inlier]
inlier_j = xj[is_inlier]
hg = lambda x : np.array([x[0], x[1], 1])
K = np.array([[1520.4, 0., 302.32],
[0, 1525.9, 246.87],
[0, 0, 1]])
E = np.dot(K.T, np.dot(F, K))
U, s, VT = np.linalg.svd(E)
if np.det(np.dot(U, VT)) < 0:
VT = -VT
E = np.dot(U, np.dot(np.diag([1,1,0]), VT))
V = VT.T
# Let's check Nister (2004) Theorem 3 constraint:
assert(np.det(U) > 0)
assert(np.det(V) > 0)
# Nister (2004) Theorem 2 ("Essential Condition")
assert sum(np.dot(E, np.dot(E.T, E)) - 0.5 * np.trace(np.dot(E, E.T)) * E) < 1.0e-10
def dlt_triangulation(ui, Pi, uj, Pj):
"""Hartley & Zisserman, 12.2"""
ui /= ui[2]
xi, yi = ui[0], ui[1]
uj /= uj[2]
xj, yj = uj[0], uj[1]
a0 = xi * Pi[2,:] - Pi[0,:]
a1 = yi * Pi[2,:] - Pi[1,:]
a2 = xj * Pj[2,:] - Pj[0,:]
a3 = yj * Pj[2,:] - Pj[1,:]
A = np.vstack((a0, a1, a2, a3))
U, s, VT = np.linalg.svd(A)
V = VT.T
X3d = V[:,-1]
return X3d/X3d[3]
def depth(X, P):
T = X[3]
M = P[:,0:3]
p4 = P[:,3]
m3 = M[2,:]
x = np.dot(P, X)
w = x[2]
X = X/w
return (np.sign(np.det(M)) * w) / (T*np.norm(m3))
def get_proj_matrices(E, K, xi, xj):
hg = lambda x : np.array([x[0], x[1], 1])
W = np.array([[0., -1., 0.],
[1., 0., 0.],
[0., 0., 1.]])
Pi = np.dot(K, np.hstack( (np.identity(3), np.zeros((3,1))) ))
U, s, VT = np.linalg.svd(E)
u3 = U[:,2].reshape(3,1)
# Candidates
Pa = np.dot(K, np.hstack((np.dot(U, np.dot(W ,VT)), u3)))
Pb = np.dot(K, np.hstack((np.dot(U, np.dot(W ,VT)), -u3)))
Pc = np.dot(K, np.hstack((np.dot(U, np.dot(W.T ,VT)), u3)))
Pd = np.dot(K, np.hstack((np.dot(U, np.dot(W.T ,VT)), -u3)))
# Find the camera for which the 3D points are *in front*
xxi, xxj = hg(xi[0]), hg(xj[0])
Pj = None
for Pk in [Pa, Pb, Pc, Pd]:
Q = dlt_triangulation(xxi, Pi, xxj, Pk)
if depth(Q, Pi) > 0 and depth(Q, Pk) > 0:
Pj = Pk
break
assert(Pj is not None)
return Pi, Pj
P1, P2 = get_proj_matrices(E, K, inlier_i, inlier_j)
X = []
for xxi, xxj in zip(inlier_i, inlier_j):
X_k = dlt_triangulation(hg(xxi), P1, hg(xxj), P2)
X.append(X_k)
X = np.array(X)
num_pix = X.shape[0]
pix_color = [rcolor() for k in range(num_pix)]
pix = np.dot(P2, X.T).T
pix = np.divide(pix, pix[:,2].reshape(num_pix, -1))
from mpl_toolkits.mplot3d import Axes3D
fig = cv2.figure()
subplot(1,2,1)
for k in range(num_pix):
cv2.plot(pix[k,0], pix[k,1], color=pix_color[k], marker='o')
cv2.imshow(T1, cmap=np.cm.gray)
ax = fig.add_subplot(1, 2, 2, projection='3d')
ax.scatter(X[:,0], X[:,1], X[:,2], zdir='z', c=pix_color)
|
gpl-2.0
| -5,787,413,777,145,128,000
| 25.836158
| 84
| 0.562855
| false
| 2.189488
| false
| false
| false
|
jeffpiazza/derbynet
|
extras/scripts/lib/read_barcode.py
|
1
|
2226
|
#!/usr/bin/env python
import struct
import sys
scanner_device = sys.argv[1]
# Values taken from include/uapi/linux/input-event-codes.h
keys = {
2: '1', 3: '2', 4: '3', 5: '4', 6: '5', 7: '6', 8: '7', 9: '8', 10: '9', 11: '0', \
12: '-', 13: '=', \
16: 'q', 17: 'w', 18: 'e', 19: 'r', 20: 't', 21: 'y', 22: 'u', 23: 'i', 24: 'o', 25: 'p', \
26: '[', 27: ']', \
30: 'a', 31: 's', 32: 'd', 33: 'f', 34: 'g', 35: 'h', 36: 'j', 37: 'k', 38: 'l', 39: ';', \
40: '\'', 43: '\\', \
44: 'z', 45: 'x', 46: 'c', 47: 'v', 48: 'b', 49: 'n', 50: 'm', 51: ',', 52: '.', 53: '/', \
57: ' ' }
shift_keys = {
2: '!', 3: '@', 4: '#', 5: '$', 6: '%', 7: '^', 8: '&', 9: '*', 10: '(', 11: ')', \
12: '_', 13: '+', \
16: 'Q', 17: 'W', 18: 'E', 19: 'R', 20: 'T', 21: 'Y', 22: 'U', 23: 'I', 24: 'O', 25: 'P', \
26: '{', 27: '}', \
30: 'A', 31: 'S', 32: 'D', 33: 'F', 34: 'G', 35: 'H', 36: 'J', 37: 'K', 38: 'L', 39: ';', \
40: '\"', 43: '|', \
44: 'Z', 45: 'X', 46: 'C', 47: 'V', 48: 'B', 49: 'N', 50: 'M', 51: '<', 52: '>', 53: '?', \
57: ' ' }
KEY_ENTER = 28
KEY_LEFTSHIFT = 42
KEY_RIGHTSHIFT = 54
EV_VALUE_KEY_RELEASED = 0
EV_VALUE_KEY_PRESSED = 1
EV_VALUE_KEY_AUTOREPEAT = 2
EV_KEY = 1
# EV_SYN = 0
# EV_MSC = 4
# 4IHHI on 64-bit machines; each of the other INEV_ indices would increase by 2
INEV_STRUCT = '2IHHI'
# Offsets in the input_event struct
#INEV_XX0 = 0
#INEV_XX1 = 1
INEV_TYPE = 2
INEV_CODE = 3
INEV_VALUE = 4
ss = ""
with open(scanner_device, 'rb') as fp:
shift = False
done = False
while not done:
buffer = fp.read(struct.calcsize(INEV_STRUCT))
ev = struct.unpack(INEV_STRUCT, buffer)
if ev[INEV_TYPE] != EV_KEY:
continue
is_keypress = ev[INEV_VALUE] == EV_VALUE_KEY_PRESSED or \
ev[INEV_VALUE] == EV_VALUE_KEY_AUTOREPEAT
# print ev
if ev[INEV_CODE] == KEY_LEFTSHIFT or ev[INEV_CODE] == KEY_RIGHTSHIFT:
shift = is_keypress
elif is_keypress:
if ev[INEV_CODE] == KEY_ENTER:
done = True
elif shift and ev[INEV_CODE] in shift_keys:
ss += shift_keys[ev[INEV_CODE]]
elif ev[INEV_CODE] in keys:
ss += keys[ev[INEV_CODE]]
print ss
|
mit
| -1,319,456,822,116,887,600
| 29.081081
| 91
| 0.469901
| false
| 2.403888
| false
| false
| false
|
kubeflow/kfserving
|
docs/samples/v1beta1/triton/torchscript/image_transformer_v2/image_transformer_v2.py
|
1
|
1981
|
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfserving
from typing import Dict
from PIL import Image
import torchvision.transforms as transforms
import logging
import io
import numpy as np
import base64
logging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL)
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def image_transform(instance):
byte_array = base64.b64decode(instance['image_bytes']['b64'])
image = Image.open(io.BytesIO(byte_array))
a = np.asarray(image)
im = Image.fromarray(a)
res = transform(im)
logging.info(res)
return res.tolist()
class ImageTransformerV2(kfserving.KFModel):
def __init__(self, name: str, predictor_host: str, protocol: str):
super().__init__(name)
self.predictor_host = predictor_host
self.protocol = protocol
def preprocess(self, inputs: Dict) -> Dict:
return {
'inputs': [
{
'name': 'INPUT__0',
'shape': [1, 3, 32, 32],
'datatype': "FP32",
'data': [image_transform(instance) for instance in inputs['instances']]
}
]
}
def postprocess(self, results: Dict) -> Dict:
return {output["name"]: np.array(output["data"]).reshape(output["shape"]).tolist()
for output in results["outputs"]}
|
apache-2.0
| -6,574,397,262,375,359,000
| 31.47541
| 90
| 0.647148
| false
| 3.861598
| false
| false
| false
|
ipselium/cpyvke
|
cpyvke/objects/panel.py
|
1
|
28277
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © 2016-2018 Cyril Desjouy <ipselium@free.fr>
#
# This file is part of cpyvke
#
# cpyvke is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cpyvke is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with cpyvke. If not, see <http://www.gnu.org/licenses/>.
#
#
# Creation Date : Mon Nov 14 09:08:25 2016
# Last Modified : mar. 03 avril 2018 15:55:45 CEST
"""
-----------
DOCSTRING
@author: Cyril Desjouy
"""
import time
import locale
import curses
import abc
from curses import panel
from math import ceil
from cpyvke.curseswin.widgets import Help
from cpyvke.curseswin.prompt import Prompt
from cpyvke.curseswin.app import check_size
from cpyvke.utils.kd import restart_daemon
from cpyvke.utils.display import format_cell
from cpyvke.utils.comm import send_msg
code = locale.getpreferredencoding()
class BasePanel(abc.ABC):
""" Generic Panel.
"""
def __init__(self, app, sock, logger):
""" Class Constructor """
# Instance arguments
self.app = app
self.sock = sock
self.logger = logger
# Init constants
self.resize = False
self.pkey = -1
# Init Prompt
self.prompt = Prompt(self.app)
self.prompt_time = 0
self.prompt_msg = ''
# Update dimensions
self.screen_height, self.screen_width = self.app.stdscr.getmaxyx() # Local dimensions
# Init subwin
self.gwin = self.app.stdscr.subwin(self.app.panel_height, self.app.screen_width, 0, 0)
self.gwin.keypad(1)
# Init Panel
self.gpan = panel.new_panel(self.gwin)
self.gpan.hide()
@property
@abc.abstractmethod
def title(self):
""" Panel title. Must return a string """
@property
@abc.abstractmethod
def panel_name(self):
""" Panel reference name. Must return a string """
@abc.abstractmethod
def color(self, item):
""" Panel colors. Required :
* for BasePanel : 'txt', 'bdr', 'ttl', 'hh', 'pwf'
* for ListPanel : 'co', 'al', 'di'
"""
@abc.abstractmethod
def fill_main_box(self):
""" Fill the main box """
def display(self):
""" Display the panel. """
try:
self.pkey = -1
while self.app.close_signal == 'continue':
self.tasks()
self.app.shutdown()
except Exception:
self.app.exit_with_error()
@check_size
def tasks(self):
""" List of tasks at each iteration """
self.resize_curses()
# Check switch panel
if self.app.explorer_switch:
self.app.explorer_switch = False
self.app.kernel_win.display()
self.resize_curses(True)
elif self.app.kernel_switch:
self.app.kernel_switch = False
self.app.explorer_win.display()
self.resize_curses(True)
else:
# Check Connection to daemon
self.sock.check_main_socket()
# Keys
self.common_key_bindings()
# Decrease delay right here to avoid some waiting at the getch when not
# in switch mode. If switch, the halfdelay is set to its normal value
# just after, in the refresh() method !
curses.halfdelay(1)
# Skip end of tasks if switching panel !
if not self.app.explorer_switch and not self.app.kernel_switch and self.app.close_signal == "continue":
# Update screen size if another menu break because of resizing
self.resize_curses()
# Update all static panels
self.refresh()
# Get pressed key (even in case of switch)
self.pkey = self.app.stdscr.getch()
def refresh(self):
""" Refresh all objects. """
# Erase all windows
self.gwin.erase()
self.app.stdscr.erase()
# Create border before updating fields
self.gwin.border(0)
# Fill the main box !
self.fill_main_box()
# Update all windows
if self.app.debug:
self.app.dbg_pad(self.pkey)
# Update infos -- Bottom
self.app.status_bar()
self.prompt_msg_display()
self.app.stdscr.refresh()
self.gwin.refresh()
# Reactive timeout for getch
curses.halfdelay(self.app.curse_delay)
def common_key_bindings(self):
""" Common key bindings """
# Custom key bindings
self.custom_key_bindings()
# Socket actions
self.socket_key_bindings()
# Item list
self.list_key_bindings()
# Menu Help
if self.pkey == 63: # -> ?
help_menu = Help(self.app)
help_menu.display()
# Prompt
elif self.pkey == 58: # -> :
self.cmd = self.prompt.with_completion(chr(self.pkey))
self.prompt_cmd()
# Send code
elif self.pkey == 120: # -> x
self.send_code()
# Debug Pad
elif self.pkey == 100: # -> d
self.toggle_debug()
def socket_key_bindings(self):
""" Socket actions key bindings. """
if self.pkey == 82: # -> R
self.daemon_restart_connection()
elif self.pkey == 68: # -> D
self.daemon_disconnect()
elif self.pkey == 67: # -> C
self.daemon_connect()
elif self.pkey == 18: # -> c-r
self.daemon_restart()
def list_key_bindings(self):
""" Not available for BasePanel. See List ListPanel """
pass
def custom_key_bindings(self):
""" Key bindings : To overload """
pass
def prompt_msg_display(self):
""" Erase prompt message after some delay """
if self.prompt_msg and time.time() - self.prompt_time > 3:
self.prompt_msg = ''
else:
self.prompt.display(self.prompt_msg)
def prompt_msg_setup(self, msg):
""" Set up the message to display in the prompt """
self.prompt_msg = msg
self.prompt_time = time.time()
def prompt_cmd(self):
""" Actions for prompt """
if not self.cmd:
pass
elif self.cmd in ["q", "quit"]:
self.app.close_signal = 'close'
elif self.cmd in ["Q", "Quit"]:
self.app.close_signal = 'shutdown'
elif self.cmd in ['k', 'K', 'kernel-manager']:
self.prompt_cmd_kernel_manager()
elif self.cmd in ['v', 'V', 'e', 'E', 'variable-explorer']:
self.prompt_cmd_variable_explorer()
elif self.cmd in ['h', 'help']:
help_menu = Help(self.app)
help_menu.display()
elif self.cmd in ['R', 'daemon-restart']:
self.daemon_restart()
elif self.cmd in ['r', 'daemon-restart-connection']:
self.daemon_restart_connection()
elif self.cmd in ['c', 'daemon-connect']:
self.daemon_connect()
elif self.cmd in ['d', 'daemon-disconnect']:
self.daemon_disconnect()
elif self.cmd in ['toggle-debug']:
self.toggle_debug()
else:
self.prompt_msg_setup('Command not found !')
def prompt_cmd_kernel_manager(self):
""" 'kernel-manager' prompt command"""
if self.panel_name in ['variable-explorer']:
self.app.explorer_switch = True
elif self.panel_name not in ['kernel-manager']:
self.app.kernel_win.display()
else:
self.prompt_msg_setup('Already in kernel manager !')
def prompt_cmd_variable_explorer(self):
""" 'variable-explorer' prompt command """
if self.panel_name in ['kernel-manager']:
self.app.kernel_switch = True
elif self.panel_name not in ['variable-explorer']:
self.app.explorer_win.display()
else:
self.prompt_msg_setup('Already in variable explorer !')
def toggle_debug(self):
""" Display/hide debug informations """
if self.app.debug:
self.app.debug = False
else:
self.app.debug = True
def send_code(self):
""" Send code to current kernel """
code = self.prompt.simple('Send-code ')
code, err = self.check_code(code)
if err:
self.prompt_msg_setup(err)
elif code:
try:
send_msg(self.sock.RequestSock, '<code>' + code)
self.logger.info('Code sent to kernel : {}'.format(code))
self.prompt_msg_setup('Code sent !')
except Exception:
self.logger.error('Code not sent !')
self.prompt_msg_setup('Code not sent !')
@staticmethod
def check_code(code):
""" Check is code is authorized """
if 'input' in code:
return '', 'input command is not allowed'
elif 'reset' in code:
return 'reset -f', 'Resetting namespace...'
else:
return code, None
def daemon_connect(self):
""" Connect to daemon socket """
self.sock.init_sockets()
self.sock.warning_socket(self.app.wng)
def daemon_disconnect(self):
""" Disconnet from daemon socket """
self.sock.close_sockets()
self.sock.warning_socket(self.app.wng)
def daemon_restart_connection(self):
""" Restart connection to the daemon socket """
self.app.wng.display(' Restarting connection ')
self.sock.restart_sockets()
self.sock.warning_socket(self.app.wng)
def daemon_restart(self):
""" Restart kd5 ! """
restart_daemon()
self.app.wng.display(' Restarting Daemon ')
self.sock.init_sockets()
self.sock.warning_socket(self.app.wng)
def resize_curses(self, force=False):
""" Check if terminal is resized and adapt screen """
# Check difference between self.screen_height and self.app.screen_height
resize = curses.is_term_resized(self.screen_height, self.screen_width)
if resize or force:
# save also these value locally to check if
self.screen_height, self.screen_width = self.app.stdscr.getmaxyx()
# Update display
self.app.stdscr.clear()
self.gwin.clear()
self.gwin.resize(self.app.panel_height, self.app.screen_width)
curses.resizeterm(self.app.screen_height, self.app.screen_width)
self.app.stdscr.refresh()
self.gwin.refresh()
class ListPanel(BasePanel):
""" Generic Panel for lists with menu.
"""
def __init__(self, app, sock, logger):
""" Class Constructor """
super(ListPanel, self).__init__(app, sock, logger)
# Some variables
self.filter = None
self.mk_sort = 'name'
self.search = None
self.search_index = 0
self.search_lst = []
self.limit_msg = ''
self.position = 0
self.page = 1
# Init variables :
self.item_dic = {}
self.item_keys = []
@property
@abc.abstractmethod
def empty(self):
""" Text for empty list. Must return a string """
return
def display(self):
""" Display the panel. """
# Init colors
self.gwin.bkgd(self.color('txt'))
self.gwin.attrset(self.color('bdr'))
self.gpan.top() # Push the panel to the bottom of the stack.
self.gpan.show() # Display the panel
self.gwin.clear()
# Update size if it has change when panel was hidden
self.resize_curses(True)
self.pkey = -1
while self.pkey not in self.app.kquit and self.app.close_signal == 'continue':
if self.app.kernel_switch or self.app.explorer_switch:
break
self.tasks()
self.gwin.clear()
self.gpan.hide()
def custom_tasks(self):
""" Supplementary tasks [To overload if needed] """
pass
@check_size
def tasks(self):
""" List of tasks at each iteration """
# Listen to resize and adapt Curses
self.resize_curses()
# Custom tasks
self.custom_tasks()
# Check Connection to daemon
self.sock.check_main_socket()
# Get items
self.item_dic = self.get_items()
self.row_num = len(self.item_dic) - 1
# Arange item list
self.arange_lst()
# Key bindings
self.common_key_bindings()
if not self.app.kernel_switch and not self.app.explorer_switch and self.app.close_signal == "continue":
# Navigate in the variable list window
self.navigate_lst()
# Update screen size
self.resize_curses()
# Update all
self.refresh()
# Get key
self.pkey = self.app.stdscr.getch()
def refresh(self):
""" Refresh all objects. """
# Erase all windows
self.gwin.erase()
self.app.stdscr.erase()
# Create border before updating fields
self.gwin.border(0)
# Fill the main box !
self.fill_main_box()
# Update all windows
if self.app.debug:
self.app.dbg_pad(self.pkey, self.search, self.filter, self.mk_sort)
# Update infos -- Bottom
self.app.status_bar()
self.prompt_msg_display()
self.app.stdscr.refresh()
self.gwin.refresh()
# Reactive timeout for getch
curses.halfdelay(self.app.curse_delay)
def list_key_bindings(self):
""" Actions linked to list of item. """
# Menu Search
if self.pkey == 47: # -> /
self.search_item('Search for : ')
# Next item (search)
if self.pkey == 110: # -> n
self.search_item_next()
# Sort variable by name/type
elif self.pkey == 115: # -> s
if self.mk_sort == 'name':
self.mk_sort = 'type'
elif self.mk_sort == 'type':
self.mk_sort = 'name'
self.arange_lst()
# Filter variables
elif self.pkey == 102: # -> f
self.filter = self.prompt.simple('Limit to : ')
if self.filter:
self.mk_sort = 'filter'
self.position = 0
self.page = 1
self.arange_lst()
else:
self.filter = None
# Reinit
elif self.pkey == 117: # -> u
self.mk_sort = 'name'
self.limit_msg = ''
self.position = 0
self.page = 1
self.arange_lst()
# Panel Menu
elif self.pkey in self.app.kenter and self.row_num != -1:
self.init_menu()
def custom_key_bindings(self):
""" Key bindings : To overload """
pass
@abc.abstractmethod
def get_items(self):
""" Return a dicionnary with items : self.item_dic """
return
def fill_main_box(self):
""" Update the item list """
# Title
if self.app.config['font']['pw-font'] == 'True':
self.gwin.addstr(0, int((self.app.screen_width-len(self.title))/2),
'', self.color('pwf'))
self.gwin.addstr(self.title, self.color('ttl'))
self.gwin.addstr('', self.color('pwf'))
else:
self.gwin.addstr(0, int((self.app.screen_width-len(self.title))/2),
'|' + self.title + '|', self.color('ttl'))
# Reset position if position is greater than the new list of var (reset)
self.row_num = len(self.item_keys) - 1
if self.position > self.row_num:
self.position = 0
self.page = 1
# Items
for i in range(self.app.row_max*(self.page-1),
self.app.row_max + self.app.row_max*(self.page-1)):
if self.row_num == -1:
self.gwin.addstr(1, 1, self.empty, self.color('hh'))
elif i <= self.row_num:
self.cell1, self.cell2 = format_cell(self.item_dic, self.item_keys[i], self.app.screen_width)
if i == self.position:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), 2,
self.cell1.encode(code), self.color('hh'))
self.fill_main_box_type_selected(i)
else:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), 2,
self.cell1.encode(code), curses.A_DIM | self.color('txt'))
self.fill_main_box_type(i)
# Bottom info
if self.app.config['font']['pw-font'] == 'True' and len(self.limit_msg) > 0:
self.gwin.addstr(self.app.panel_height-1,
int((self.app.screen_width-len(self.limit_msg))/2),
'', self.color('pwf'))
self.gwin.addstr(self.limit_msg, self.color('ttl'))
self.gwin.addstr('', self.color('pwf'))
elif len(self.limit_msg) > 0:
self.gwin.addstr(self.app.panel_height-1,
int((self.app.screen_width-len(self.limit_msg))/2),
'< ' + self.limit_msg + ' >', curses.A_DIM | self.color('ttl'))
self.app.stdscr.refresh()
self.gwin.refresh()
def fill_main_box_type_selected(self, i):
if "[Died]" in self.cell2:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('di'))
elif "[Alive]" in self.cell2:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('al'))
elif "[Connected]" in self.cell2:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('co'))
else:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('hh'))
def fill_main_box_type(self, i):
if "[Died]" in self.cell2:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('di'))
elif "[Alive]" in self.cell2:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('al'))
elif "[Connected]" in self.cell2:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('co'))
else:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('txt'))
@staticmethod
def filter_var_lst(item_dic, filt):
""" Filter variable list (name|type). """
filtered = []
for key in list(item_dic):
if filt in item_dic[key]['type'] or filt in key:
filtered.append(key)
return sorted(filtered)
@staticmethod
def type_sort(item_dic):
""" Sort variable by type. """
from operator import itemgetter
types = []
for key in list(item_dic):
types.append([key, item_dic[key]['type']])
types.sort(key=itemgetter(1))
return [item[0] for item in types]
def arange_lst(self):
""" Organize/Arange variable list. """
if self.mk_sort == 'name':
self.item_keys = sorted(list(self.item_dic))
elif self.mk_sort == 'type':
self.item_keys = self.type_sort(self.item_dic)
elif self.mk_sort == 'filter' and self.filter:
self.item_keys = self.filter_var_lst(self.item_dic, self.filter)
if not self.item_keys:
self.prompt_msg_setup('{} not found'.format(self.filter))
self.item_keys = sorted(list(self.item_dic))
self.filter = None
self.mk_sort = 'name'
else:
self.limit_msg = ' Filter : {} ({} obj.) '.format(self.filter, len(self.item_keys))
else:
self.item_keys = list(self.item_dic)
# Update number of columns
self.row_num = len(self.item_keys) - 1
def search_item(self, txt_msg):
""" Search an object in the variable list """
self.search = self.prompt.simple(txt_msg)
self.search_lst = [i for i, s in enumerate(self.item_keys) if self.search in s]
self.search_index = 0
self.logger.info('Searching for : {} in :\n{}'.format(self.search, self.item_keys))
if self.search_lst and self.search:
if len(self.search_lst) == 1:
self.prompt_msg_setup("{} occurence of '{}' found".format(len(self.search_lst), self.search))
else:
self.prompt_msg_setup("{} occurences of '{}' found".format(len(self.search_lst), self.search))
self.position = self.search_lst[self.search_index]
self.page = ceil((self.position+1)/self.app.row_max)
elif not self.search:
pass
else:
self.prompt_msg_setup(self.search + ' not found !')
self.position = 0
self.page = 1
def search_item_next(self):
""" Next occurence of the searching. """
self.search_lst = [i for i, s in enumerate(self.item_keys) if self.search in s]
if self.search_lst and self.search_index < len(self.search_lst) - 1:
self.search_index += 1
else:
self.search_index = 0
self.position = self.search_lst[self.search_index]
self.page = ceil((self.position+1)/self.app.row_max)
def navigate_lst(self):
""" Navigation though the item list"""
self.pages = ceil((self.row_num + 1)/self.app.row_max)
if self.pkey in self.app.kdown:
self.navigate_down()
if self.pkey in self.app.kup:
self.navigate_up()
if self.pkey in self.app.kleft and self.page > 1:
self.navigate_left()
if self.pkey in self.app.kright and self.page < self.pages:
self.navigate_right()
def navigate_right(self):
""" Navigate Right. """
self.page = self.page + 1
self.position = self.app.row_max*(self.page-1)
def navigate_left(self):
""" Navigate Left. """
self.page = self.page - 1
self.position = self.app.row_max*(self.page-1)
def navigate_up(self):
""" Navigate Up. """
if self.page == 1:
if self.position > 0:
self.position = self.position - 1
else:
if self.position > self.app.row_max*(self.page - 1):
self.position = self.position - 1
else:
self.page = self.page - 1
self.position = self.app.row_max - 1 + self.app.row_max*(self.page - 1)
def navigate_down(self):
""" Navigate Down. """
# First page
if self.page == 1:
if (self.position < self.app.row_max - 1) and (self.position < self.row_num):
self.position = self.position + 1
else:
if self.pages > 1:
self.page = self.page + 1
self.position = self.app.row_max*(self.page - 1)
# Last page
elif self.page == self.pages:
if self.position < self.row_num:
self.position = self.position + 1
# Between
else:
if self.position < self.app.row_max - 1 + self.app.row_max*(self.page - 1):
self.position = self.position + 1
else:
self.page = self.page + 1
self.position = self.app.row_max*(self.page - 1)
def init_menu(self):
""" Init the menu """
self.selected = self.item_keys[self.position]
# Add specific initilization
self.menu_special_init()
# Create menu list
self.menu_lst = self.create_menu()
# Various variables
self.menu_cursor = 0
self.menu_title = ' ' + self.selected.split('/')[-1] + ' '
# Menu dimensions
self.menu_width = len(max(
[self.menu_lst[i][0] for i in range(len(self.menu_lst))], key=len))
self.menu_width = max(self.menu_width, len(self.menu_title)) + 4
self.menu_height = len(self.menu_lst) + 2
self.title_pos = int((self.menu_width - len(self.menu_title) - 2)/2)
# Init Menu
self.gwin_menu = self.app.stdscr.subwin(self.menu_height,
self.menu_width, 2,
self.app.screen_width-self.menu_width-2)
self.gwin_menu.border(0)
self.gwin_menu.bkgd(self.color('txt'))
self.gwin_menu.attrset(self.color('bdr')) # Change border color
self.gwin_menu.keypad(1)
# Send menu to a panel
self.gpan_menu = panel.new_panel(self.gwin_menu)
# Hide the panel. This does not delete the object, it just makes it invisible.
self.gpan_menu.hide()
panel.update_panels()
# Submenu
self.display_menu()
def menu_special_init(self):
""" Additionnal initialization for menu """
pass
def create_menu(self):
""" Create the item list for the kernel menu : To overload """
return [('No Option', 'None')]
def display_menu(self):
""" Display the menu """
self.gpan_menu.top() # Push the panel to the bottom of the stack.
self.gpan_menu.show() # Display the panel (which might have been hidden)
self.gwin_menu.clear()
menukey = -1
while menukey not in self.app.kquit:
self.gwin_menu.border(0)
# Title
if self.app.config['font']['pw-font'] == 'True':
self.gwin_menu.addstr(0, self.title_pos, '', self.color('pwf'))
self.gwin_menu.addstr(self.menu_title, self.color('ttl'))
self.gwin_menu.addstr('', self.color('pwf'))
else:
self.gwin_menu.addstr(0, self.title_pos,
'|' + self.menu_title + '|', self.color('ttl'))
self.gwin_menu.refresh()
# Create entries
for index, item in enumerate(self.menu_lst):
if index == self.menu_cursor:
mode = self.color('hh')
else:
mode = self.color('txt') | curses.A_DIM
self.gwin_menu.addstr(1+index, 1, item[0], mode)
# Wait for keyboard event
menukey = self.gwin_menu.getch()
if menukey in self.app.kenter:
eval(self.menu_lst[self.menu_cursor][1])
break
elif menukey in self.app.kup:
self.navigate_menu(-1, len(self.menu_lst))
elif menukey in self.app.kdown:
self.navigate_menu(1, len(self.menu_lst))
if menukey == curses.KEY_RESIZE:
self.resize = True
break
self.gwin_menu.clear()
self.gpan_menu.hide()
def navigate_menu(self, n, size):
""" Navigate through the menu """
self.menu_cursor += n
if self.menu_cursor < 0:
self.menu_cursor = 0
elif self.menu_cursor >= size:
self.menu_cursor = size - 1
|
gpl-3.0
| -6,112,869,070,022,161,000
| 30.439377
| 115
| 0.539096
| false
| 3.768533
| false
| false
| false
|
pdl30/pynoncode
|
pynoncode/fasta_parsers.py
|
1
|
3648
|
#!/usr/bin/python
########################################################################
# 28 Apr 2014
# Patrick Lombard, Centre for Stem Stem Research
# Core Bioinformatics Group
# University of Cambridge
# All right reserved.
########################################################################
import argparse
import subprocess
import sys, re, os
from collections import defaultdict
from itertools import izip
def parse_paired_fastq(fq1, fq2, outdir):
dict2 = defaultdict(int)
count_dict = defaultdict(int)
f1=open(fq1)
f2=open(fq2)
for line1, line2 in izip(f1, f2):
line1 = line1.rstrip()
id1 = line1.split("#")
line2 = line2.rstrip()
id2 = line2.split("#")
try:
id11 = next(f1)
read1 = id11.rstrip()
id22 = next(f2)
read2 = id22.rstrip()
reads = "{}\t{}".format(read1, read2)
dict2[reads] += 1
crap = next(f1)
crap2 = next(f1)
crap = next(f2)
crap2 = next(f2)
except StopIteration:
break
seen = {}
name1 = "original_fasta_1.fa"
name2 = "original_fasta_2.fa"
count = 1
output1 = open(outdir + "/" + name1, "wb")
output2 = open(outdir + "/" + name2, "wb")
for key in dict2.keys():
reads = key.split("\t")
output1.write(">ID:{}\n{}\n".format(count, reads[0])),
output2.write(">ID:{}\n{}\n".format(count, reads[1])),
count_dict[count] = dict2[key]
count += 1
output3 = open(outdir + "/" + "count_dict.txt", "w")
for key in count_dict.keys():
output3.write("{}\t{}\n".format(key, count_dict[key])),
def parse_single_fastq(fq1, outdir):
dict2 = defaultdict(int)
count_dict = defaultdict(int)
f1=open(fq1)
for line1 in f1:
line1 = line1.rstrip()
id1 = line1.split("#")
try:
id11 = next(f1)
read1 = id11.rstrip()
dict2[read1] += 1
crap = next(f1)
crap2 = next(f1)
except StopIteration:
break
seen = {}
name1 = "original_fasta.fa"
count = 1
output1 = open(outdir + "/" + name1, "wb")
for key in dict2.keys():
reads = key.split("\t")
output1.write(">ID:{}\n{}\n".format(count, reads[0])),
count_dict[count] = dict2[key]
count += 1
output3 = open(outdir + "/" + "count_dict.txt", "w")
for key in count_dict.keys():
output3.write("{}\t{}\n".format(key, count_dict[key])),
def read_fasta(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith(">"):
if name: yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: yield (name, ''.join(seq))
def stripper(fasta):
result = {}
with open(fasta) as f:
for name, seq in read_fasta(f):
bases = list(seq)
end1 = bases[-3:]
end1 = ''.join(end1)
if end1 == "CCA":
tmpseq = bases[:-3]
seq = ''.join(tmpseq)
end2 = bases[-4:]
end2 = ''.join(end2)
if end2 == "CCAC":
tmpseq = bases[:-4]
seq = ''.join(tmpseq)
end3 = bases[-5:]
end3 = ''.join(end3)
if end3 == "CCACC":
tmpseq = bases[:-5]
seq = ''.join(tmpseq)
end4 = bases[-6:]
end4 = ''.join(end4)
if end4 == "CCACCA":
tmpseq = bases[:-6]
seq = ''.join(tmpseq)
result[name] = seq
return result
def strip_ends(paired):
if paired == True:
output1 = open("clipped_1.fa", "w")
output2 = open("clipped_2.fa", "w")
data1 = stripper("unclipped_multi_unmapped_1.fa")
data2 = stripper("unclipped_multi_unmapped_2.fa")
for key in sorted(data1.keys()):
output1.write("{}\n{}\n".format(key, data1[key])),
for key in sorted(data2.keys()):
output2.write("{}\n{}\n".format(key, data2[key])),
else:
data1 = stripper("unclipped_multi_unmapped.fa")
output1 = open("clipped_fasta.fa", "w")
for key in sorted(data1.keys()):
output1.write("{}\n{}\n".format(key, data1[key])),
|
gpl-2.0
| 8,414,638,851,961,033,000
| 25.064286
| 72
| 0.584704
| false
| 2.628242
| false
| false
| false
|
eklinkhammer/gym-ctf
|
gym_ctf/state/flag.py
|
1
|
3158
|
import numpy as np
import random
import math
class Flag():
""" A flag is target that agents use to score in capture the flag.
Once captured, it is marked as taken and stores the scoring team.
"""
def __init__(self, pos, scoring_radius):
assert scoring_radius >= 0
self.position = pos
self.scoring_radius = scoring_radius
self.taken = False
self.scoring_team = None
self.scoring_count = 0
def take(self, team_id):
self.taken = True
self.scoring_team = team_id
def reset(self):
self.taken = False
self.scoring_team = None
self.scoring_count = 0
def random_pos(min_x, min_y, max_x, max_y):
""" Generates a random tuple representing a 2D point within the box
defined by the ranges.
Args:
min_x (double): Minimum x value (lower-left corner)
min_y (double): Minimum y value (lower-left corner)
max_x (double): Maximum x value (upper-right corner)
max_y (double): Maximum y value (upper-right corner)
Returns:
(double, double). 2D point.
"""
if max_y is None: max_y = max_x
rand_x = random.randrange(min_x, max_x,1)
rand_y = random.randrange(min_y, max_y,1)
position = (rand_x, rand_y)
return position
def random_flag(min_x, min_y, max_x, max_y, scoring_radius):
""" Generates a random flag at a position within the bounding box
provided using the given scoring radius. Scoring radius is not
random because it depends (for best results) on container size.
Args:
min_x (double): Minimum x value (lower-left corner)
min_y (double): Minimum y value (lower-left corner)
max_x (double): Maximum x value (upper-right corner)
max_y (double): Maximum y value (upper-right corner)
scoring_radius (double): The radius within which agents can ctf
Returns:
Flag. A flag object at a random 2D point.
"""
return Flag(Flag.random_pos(min_x, min_y, max_x, max_y), scoring_radius)
def obs(self):
""" Returns the observation of a flag in format expected by gym env
Returns:
numpy array of length 3. Contains position of flag and scoring
team. Team is 0 if no team scores
"""
if self.taken:
team = self.scoring_team
else:
team = 0
return np.array([self.position[0], self.position[1], team])
def within_scoring_distance(self, position_other):
""" Determine if other position is within the scoring radius of the flag
Args:
position_other (2-tuple of doubles): 2D point
Returns:
boolean. True iff position_other is within scoring radius.
"""
distance = math.sqrt(math.pow(self.position[0] - position_other[0], 2) +
math.pow(self.position[1] - position_other[1], 2))
return distance <= self.scoring_radius
|
mit
| 6,347,739,872,781,049,000
| 33.703297
| 80
| 0.578214
| false
| 4.160738
| false
| false
| false
|
gajim/python-nbxmpp
|
nbxmpp/modules/register/util.py
|
1
|
4233
|
# Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.protocol import Iq
from nbxmpp.const import REGISTER_FIELDS
from nbxmpp.structs import RegisterData
from nbxmpp.errors import StanzaError
from nbxmpp.errors import MalformedStanzaError
from nbxmpp.modules.dataforms import create_field
from nbxmpp.modules.dataforms import extend_form
from nbxmpp.modules.dataforms import SimpleDataForm
from nbxmpp.modules.bits_of_binary import parse_bob_data
def _make_password_change_request(domain, username, password):
iq = Iq('set', Namespace.REGISTER, to=domain)
query = iq.getQuery()
query.setTagData('username', username)
query.setTagData('password', password)
return iq
def _make_password_change_with_form(domain, form):
iq = Iq('set', Namespace.REGISTER, to=domain)
iq.setQueryPayload(form)
return iq
def _make_register_form(jid, form):
iq = Iq('set', Namespace.REGISTER, to=jid)
if form.is_fake_form():
query = iq.getTag('query')
for field in form.iter_fields():
if field.var == 'fakeform':
continue
query.addChild(field.var, payload=[field.value])
return iq
iq.setQueryPayload(form)
return iq
def _make_unregister_request(jid):
iq = Iq('set', to=jid)
query = iq.setQuery()
query.setNamespace(Namespace.REGISTER)
query.addChild('remove')
return iq
def _parse_oob_url(query):
oob = query.getTag('x', namespace=Namespace.X_OOB)
if oob is not None:
return oob.getTagData('url') or None
return None
def _parse_form(stanza):
query = stanza.getTag('query', namespace=Namespace.REGISTER)
form = query.getTag('x', namespace=Namespace.DATA)
if form is None:
return None
form = extend_form(node=form)
field = form.vars.get('FORM_TYPE')
if field is None:
return None
# Invalid urn:xmpp:captcha used by ejabberd
# See https://github.com/processone/ejabberd/issues/3045
if field.value in ('jabber:iq:register', 'urn:xmpp:captcha'):
return form
return None
def _parse_fields_form(query):
fields = []
for field in query.getChildren():
field_name = field.getName()
if field_name not in REGISTER_FIELDS:
continue
required = field_name in ('username', 'password')
typ = 'text-single' if field_name != 'password' else 'text-private'
fields.append(create_field(typ=typ,
var=field_name,
required=required))
if not fields:
return None
fields.append(create_field(typ='hidden', var='fakeform'))
return SimpleDataForm(type_='form',
instructions=query.getTagData('instructions'),
fields=fields)
def _parse_register_data(response):
query = response.getTag('query', namespace=Namespace.REGISTER)
if query is None:
raise StanzaError(response)
instructions = query.getTagData('instructions') or None
data = RegisterData(instructions=instructions,
form=_parse_form(response),
fields_form=_parse_fields_form(query),
oob_url=_parse_oob_url(query),
bob_data=parse_bob_data(query))
if (data.form is None and
data.fields_form is None and
data.oob_url is None):
raise MalformedStanzaError('invalid register response', response)
return data
|
gpl-3.0
| 3,761,805,763,904,235,000
| 31.305344
| 75
| 0.661153
| false
| 3.861314
| false
| false
| false
|
thomasmoelhave/TerraNNI
|
data-generator.py
|
1
|
2762
|
#!/usr/bin/env python
# encoding: utf-8
import sys
import os
import math
import random
from optparse import OptionParser
# Simple function to write out results in a (which contains three arrays) to file fn
def writeArray(fn, a):
if fn:
f = open(fn, 'w')
for i in range(0,len(a)):
f.write("%f %f %f %d\n" % (a[i][0],a[i][1],a[i][2],a[i][3]))
f.close()
else:
for i in range(0,len(a)):
print "%f %f %f %d\n" % (a[i][0],a[i][1],a[i][2],a[i][3])
def linearTF(t,m = 1):
return t * m
def symetricSmooth(tf,to):
xo = yo = 0.5
def retFunc(x,y,t):
return to + tf(t) * math.cos( math.pow(x-xo,2) + math.pow(y-yo,2) )
return retFunc
def wall(xmin, xmax, tmin, tmax):
h = 20
b = 1
width = 5
def retFunc(x,y,t):
tp = 1 - (1.0 * (tmax - t - tmin) / (tmax - tmin))
xm = tp * (xmax - xmin) + xmin
if ( x >= xm and x < xm + width ):
return h
return b
return retFunc
def randData():
def retFunc(x,y,t):
return myrand(1,100,0.01)
return retFunc
def myrand(mn, mx, g):
m = 1
while (g * m) != int(g*m):
m = m*10
return (1.0 * random.randrange(m*mn, m*mx, m*g)) / m
def fill(f,n,p,ta,fn):
if fn:
fl = open(fn, 'w')
#r = []
for j in range(0,len(ta)):
t1 = ta[j]
print 'time: ', t1, n
for i in range(0,n):
x1 = myrand(p['xmin'],p['xmax'],p['gran'])
y1 = myrand(p['ymin'],p['ymax'],p['gran'])
if fn:
fl.write("%f %f %f %d\n" % (x1,y1,f(x1,y1,t1),t1))
else:
print "%f %f %f %d\n" % (x1,y1,f(x1,y1,t1),t1)
#r.append( (x1,y1,f(x1,y1,t1),t1) )
if fn:
fl.close()
#return r
def main():
parser = OptionParser()
parser.add_option("-f", "--file", action="store", type="string", dest="filename")
parser.add_option("-n", type="int", dest="n", default=1000)
(options, args) = parser.parse_args()
params = {'xmin': -100, 'ymin': -100, 'xmax': 100, 'ymax': 100, 'gran': 0.001 }
print "n: ", options.n
# Only use ONE Of these functions for any given run
#writeArray( options.filename, fill( symetricSmooth(linearTF,100.0), options.n, params, (0,10) ) )
#writeArray( options.filename, fill( wall(params['xmin'], params['xmax'],0,100), options.n, params, range(0,110,10) )
#writeArray( options.filename, fill( randData, 200000000, params, range(0,21,1) ))
#fill( symetricSmooth(linearTF,100.0), options.n, params, (0,10), options.filename )
ta= range(0,110,1)
ta.remove(10)
ta.remove(30)
ta.remove(50)
ta.remove(70)
ta.remove(90)
#fill( wall(params['xmin'], params['xmax'],0,100), options.n, params, ta, options.filename )
#fill( randData(), 200000000, params, range(0,21,1), options.filename)
#fill( randData(), 50000000, params, range(0,11,1), options.filename) # BIG DATA
fill( randData(), 10000000, params, range(0,11,1), options.filename)
if __name__ == '__main__':
main()
|
gpl-3.0
| -3,000,724,322,534,402,000
| 25.815534
| 118
| 0.60391
| false
| 2.317114
| false
| false
| false
|
lym/allura-git
|
Allura/allura/lib/utils.py
|
1
|
20481
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
import time
import string
import hashlib
import binascii
import logging.handlers
import codecs
from ming.odm import session
import os.path
import datetime
import random
import mimetypes
import re
import magic
from itertools import groupby
import collections
import tg
import pylons
import json
import webob.multidict
from formencode import Invalid
from tg.decorators import before_validate
from pylons import response
from pylons import tmpl_context as c
from pylons.controllers.util import etag_cache
from paste.deploy.converters import asbool, asint
from paste.httpheaders import CACHE_CONTROL, EXPIRES
from webhelpers.html import literal
from webob import exc
from pygments.formatters import HtmlFormatter
from setproctitle import getproctitle
import html5lib.sanitizer
from ew import jinja2_ew as ew
from ming.utils import LazyProperty
from ming.odm.odmsession import ODMCursor
MARKDOWN_EXTENSIONS = ['.markdown', '.mdown', '.mkdn', '.mkd', '.md']
def permanent_redirect(url):
try:
tg.redirect(url)
except exc.HTTPFound, err:
raise exc.HTTPMovedPermanently(location=err.location)
def guess_mime_type(filename):
'''Guess MIME type based on filename.
Applies heuristics, tweaks, and defaults in centralized manner.
'''
# Consider changing to strict=False
content_type = mimetypes.guess_type(filename, strict=True)
if content_type[0]:
content_type = content_type[0]
else:
content_type = 'application/octet-stream'
return content_type
class ConfigProxy(object):
'''Wrapper for loading config values at module-scope so we don't
have problems when a module is imported before tg.config is initialized
'''
def __init__(self, **kw):
self._kw = kw
def __getattr__(self, k):
return self.get(k)
def get(self, key, default=None):
return tg.config.get(self._kw.get(key, key), default)
def get_bool(self, key):
return asbool(self.get(key))
class lazy_logger(object):
'''Lazy instatiation of a logger, to ensure that it does not get
created before logging is configured (which would make it disabled)'''
def __init__(self, name):
self._name = name
@LazyProperty
def _logger(self):
return logging.getLogger(self._name)
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError, name
return getattr(self._logger, name)
class TimedRotatingHandler(logging.handlers.BaseRotatingHandler):
def __init__(self, strftime_pattern):
self.pattern = strftime_pattern
self.last_filename = self.current_filename()
logging.handlers.BaseRotatingHandler.__init__(
self, self.last_filename, 'a')
def current_filename(self):
return os.path.abspath(datetime.datetime.utcnow().strftime(self.pattern))
def shouldRollover(self, record):
'Inherited from BaseRotatingFileHandler'
return self.current_filename() != self.last_filename
def doRollover(self):
self.stream.close()
self.baseFilename = self.current_filename()
if self.encoding:
self.stream = codecs.open(self.baseFilename, 'w', self.encoding)
else:
self.stream = open(self.baseFilename, 'w')
class StatsHandler(TimedRotatingHandler):
fields = (
'action', 'action_type', 'tool_type', 'tool_mount', 'project', 'neighborhood',
'username', 'url', 'ip_address')
def __init__(self,
strftime_pattern,
module='allura',
page=1,
**kwargs):
self.page = page
self.module = module
TimedRotatingHandler.__init__(self, strftime_pattern)
def emit(self, record):
if not hasattr(record, 'action'):
return
kwpairs = dict(
module=self.module,
page=self.page)
for name in self.fields:
kwpairs[name] = getattr(record, name, None)
kwpairs.update(getattr(record, 'kwpairs', {}))
record.kwpairs = ','.join(
'%s=%s' % (k, v) for k, v in sorted(kwpairs.iteritems())
if v is not None)
record.exc_info = None # Never put tracebacks in the rtstats log
TimedRotatingHandler.emit(self, record)
class CustomWatchedFileHandler(logging.handlers.WatchedFileHandler):
"""Custom log handler for Allura"""
def format(self, record):
"""Prepends current process name to ``record.name`` if running in the
context of a taskd process that is currently processing a task.
"""
title = getproctitle()
if title.startswith('taskd:'):
record.name = "{0}:{1}".format(title, record.name)
return super(CustomWatchedFileHandler, self).format(record)
def chunked_find(cls, query=None, pagesize=1024, sort_key='_id', sort_dir=1):
'''
Execute a mongo query against the specified class, yield some results at
a time (avoids mongo cursor timeouts if the total result set is very large).
Pass an indexed sort_key for efficient queries. Default _id should work
in most cases.
'''
if query is None:
query = {}
page = 0
max_id = None
while True:
if sort_key:
if max_id:
if sort_key not in query:
query[sort_key] = {}
query[sort_key]['$gt'] = max_id
q = cls.query.find(query).limit(pagesize).sort(sort_key, sort_dir)
else:
# skipping requires scanning, even for an indexed query
q = cls.query.find(query).limit(pagesize).skip(pagesize * page)
results = (q.all())
if not results:
break
if sort_key:
max_id = results[-1][sort_key]
yield results
page += 1
def lsub_utf8(s, n):
'''Useful for returning n bytes of a UTF-8 string, rather than characters'''
while len(s) > n:
k = n
while (ord(s[k]) & 0xc0) == 0x80:
k -= 1
return s[:k]
return s
def chunked_list(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i + n]
def chunked_iter(iterable, max_size):
'''return iterable 'chunks' from the iterable of max size max_size'''
eiter = enumerate(iterable)
keyfunc = lambda (i, x): i // max_size
for _, chunk in groupby(eiter, keyfunc):
yield (x for i, x in chunk)
class AntiSpam(object):
'''Helper class for bot-protecting forms'''
honey_field_template = string.Template('''<p class="$honey_class">
<label for="$fld_id">You seem to have CSS turned off.
Please don't fill out this field.</label><br>
<input id="$fld_id" name="$fld_name" type="text"><br></p>''')
def __init__(self, request=None, num_honey=2):
self.num_honey = num_honey
if request is None or request.method == 'GET':
self.request = pylons.request
self.timestamp = int(time.time())
self.spinner = self.make_spinner()
self.timestamp_text = str(self.timestamp)
self.spinner_text = self._wrap(self.spinner)
else:
self.request = request
self.timestamp_text = request.params['timestamp']
self.spinner_text = request.params['spinner']
self.timestamp = int(self.timestamp_text)
self.spinner = self._unwrap(self.spinner_text)
self.spinner_ord = map(ord, self.spinner)
self.random_padding = [random.randint(0, 255) for x in self.spinner]
self.honey_class = self.enc(self.spinner_text, css_safe=True)
# The counter is to ensure that multiple forms in the same page
# don't end up with the same id. Instead of doing:
#
# honey0, honey1
# which just relies on 0..num_honey we include a counter
# which is incremented every time extra_fields is called:
#
# honey00, honey 01, honey10, honey11
self.counter = 0
@staticmethod
def _wrap(s):
'''Encode a string to make it HTML id-safe (starts with alpha, includes
only digits, hyphens, underscores, colons, and periods). Luckily, base64
encoding doesn't use hyphens, underscores, colons, nor periods, so we'll
use these characters to replace its plus, slash, equals, and newline.
'''
tx_tbl = string.maketrans('+/', '-_')
s = binascii.b2a_base64(s)
s = s.rstrip('=\n')
s = s.translate(tx_tbl)
s = 'X' + s
return s
@staticmethod
def _unwrap(s):
tx_tbl = string.maketrans('-_', '+/')
s = s[1:]
s = str(s).translate(tx_tbl)
i = len(s) % 4
if i > 0:
s += '=' * (4 - i)
s = binascii.a2b_base64(s + '\n')
return s
def enc(self, plain, css_safe=False):
'''Stupid fieldname encryption. Not production-grade, but
hopefully "good enough" to stop spammers. Basically just an
XOR of the spinner with the unobfuscated field name
'''
# Plain starts with its length, includes the ordinals for its
# characters, and is padded with random data
plain = ([len(plain)]
+ map(ord, plain)
+ self.random_padding[:len(self.spinner_ord) - len(plain) - 1])
enc = ''.join(chr(p ^ s) for p, s in zip(plain, self.spinner_ord))
enc = self._wrap(enc)
if css_safe:
enc = ''.join(ch for ch in enc if ch.isalpha())
return enc
def dec(self, enc):
enc = self._unwrap(enc)
enc = list(map(ord, enc))
plain = [e ^ s for e, s in zip(enc, self.spinner_ord)]
plain = plain[1:1 + plain[0]]
plain = ''.join(map(chr, plain))
return plain
def extra_fields(self):
yield ew.HiddenField(name='timestamp', value=self.timestamp_text).display()
yield ew.HiddenField(name='spinner', value=self.spinner_text).display()
for fldno in range(self.num_honey):
fld_name = self.enc('honey%d' % (fldno))
fld_id = self.enc('honey%d%d' % (self.counter, fldno))
yield literal(self.honey_field_template.substitute(
honey_class=self.honey_class,
fld_id=fld_id,
fld_name=fld_name))
self.counter += 1
def make_spinner(self, timestamp=None):
if timestamp is None:
timestamp = self.timestamp
try:
client_ip = ip_address(self.request)
except (TypeError, AttributeError), err:
client_ip = '127.0.0.1'
plain = '%d:%s:%s' % (
timestamp, client_ip, pylons.config.get('spinner_secret', 'abcdef'))
return hashlib.sha1(plain).digest()
@classmethod
def validate_request(cls, request=None, now=None, params=None):
if request is None:
request = pylons.request
if params is None:
params = request.params
new_params = dict(params)
if not request.method == 'GET':
new_params.pop('timestamp', None)
new_params.pop('spinner', None)
obj = cls(request)
if now is None:
now = time.time()
if obj.timestamp > now + 5:
raise ValueError, 'Post from the future'
if now - obj.timestamp > 24 * 60 * 60:
raise ValueError, 'Post from the distant past'
if obj.spinner != obj.make_spinner(obj.timestamp):
raise ValueError, 'Bad spinner value'
for k in new_params.keys():
new_params[obj.dec(k)] = new_params.pop(k)
for fldno in range(obj.num_honey):
value = new_params.pop('honey%s' % fldno)
if value:
raise ValueError, 'Value in honeypot field: %s' % value
return new_params
@classmethod
def validate(cls, error_msg):
'''Controller decorator to raise Invalid errors if bot protection is engaged'''
def antispam_hook(remainder, params):
'''Converts various errors in validate_request to a single Invalid message'''
try:
new_params = cls.validate_request(params=params)
params.update(new_params)
except (ValueError, TypeError, binascii.Error):
raise Invalid(error_msg, params, None)
return before_validate(antispam_hook)
class TruthyCallable(object):
'''
Wraps a callable to make it truthy in a boolean context.
Assumes the callable returns a truthy value and can be called with no args.
'''
def __init__(self, callable):
self.callable = callable
def __call__(self, *args, **kw):
return self.callable(*args, **kw)
def __nonzero__(self):
return self.callable()
class TransformedDict(collections.MutableMapping):
"""
A dictionary which applies an arbitrary
key-altering function before accessing the keys.
From: http://stackoverflow.com/questions/3387691/python-how-to-perfectly-override-a-dict
"""
def __init__(self, *args, **kwargs):
self.store = dict()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
return self.store[self.__keytransform__(key)]
def __setitem__(self, key, value):
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
del self.store[self.__keytransform__(key)]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __keytransform__(self, key):
return key
class CaseInsensitiveDict(TransformedDict):
def __keytransform__(self, key):
return key.lower()
def postmortem_hook(etype, value, tb): # pragma no cover
import sys
import pdb
import traceback
try:
from IPython.ipapi import make_session
make_session()
from IPython.Debugger import Pdb
sys.stderr.write('Entering post-mortem IPDB shell\n')
p = Pdb(color_scheme='Linux')
p.reset()
p.setup(None, tb)
p.print_stack_trace()
sys.stderr.write('%s: %s\n' % (etype, value))
p.cmdloop()
p.forget()
# p.interaction(None, tb)
except ImportError:
sys.stderr.write('Entering post-mortem PDB shell\n')
traceback.print_exception(etype, value, tb)
pdb.post_mortem(tb)
class LineAnchorCodeHtmlFormatter(HtmlFormatter):
def _wrap_pre(self, inner):
style = []
if self.prestyles:
style.append(self.prestyles)
if self.noclasses:
style.append('line-height: 125%')
style = '; '.join(style)
num = self.linenostart
yield 0, ('<pre' + (style and ' style="%s"' % style) + '>')
for tup in inner:
yield (tup[0], '<div id="l%s" class="code_block">%s</div>' % (num, tup[1]))
num += 1
yield 0, '</pre>'
def generate_code_stats(blob):
stats = {'line_count': 0,
'code_size': 0,
'data_line_count': 0}
code = blob.text
lines = code.split('\n')
stats['code_size'] = blob.size
stats['line_count'] = len(lines)
spaces = re.compile(r'^\s*$')
stats['data_line_count'] = sum([1 for l in lines if not spaces.match(l)])
return stats
def is_text_file(file):
msg = magic.from_buffer(file[:1024])
if ("text" in msg) or ("empty" in msg):
return True
return False
def take_while_true(source):
x = source()
while x:
yield x
x = source()
def serve_file(fp, filename, content_type, last_modified=None,
cache_expires=None, size=None, embed=True, etag=None):
'''Sets the response headers and serves as a wsgi iter'''
if not etag and filename and last_modified:
etag = u'{0}?{1}'.format(filename, last_modified).encode('utf-8')
if etag:
etag_cache(etag)
pylons.response.headers['Content-Type'] = ''
pylons.response.content_type = content_type.encode('utf-8')
pylons.response.cache_expires = cache_expires or asint(
tg.config.get('files_expires_header_secs', 60 * 60))
pylons.response.last_modified = last_modified
if size:
pylons.response.content_length = size
if 'Pragma' in pylons.response.headers:
del pylons.response.headers['Pragma']
if 'Cache-Control' in pylons.response.headers:
del pylons.response.headers['Cache-Control']
if not embed:
pylons.response.headers.add(
'Content-Disposition',
'attachment;filename="%s"' % filename.encode('utf-8'))
# http://code.google.com/p/modwsgi/wiki/FileWrapperExtension
block_size = 4096
if 'wsgi.file_wrapper' in tg.request.environ:
return tg.request.environ['wsgi.file_wrapper'](fp, block_size)
else:
return iter(lambda: fp.read(block_size), '')
class ForgeHTMLSanitizer(html5lib.sanitizer.HTMLSanitizer):
valid_iframe_srcs = ('https://www.youtube.com/embed/', 'https://www.gittip.com/')
def sanitize_token(self, token):
if 'iframe' in self.allowed_elements:
self.allowed_elements.remove('iframe')
if token.get('name') == 'iframe':
attrs = dict(token.get('data'))
if attrs.get('src', '').startswith(self.valid_iframe_srcs):
self.allowed_elements.append('iframe')
return super(ForgeHTMLSanitizer, self).sanitize_token(token)
def ip_address(request):
ip = request.remote_addr
if tg.config.get('ip_address_header'):
ip = request.headers.get(tg.config['ip_address_header']) or ip
return ip
class EmptyCursor(ODMCursor):
"""Ming cursor with no results"""
def __init__(self, *args, **kw):
pass
@property
def extensions(self):
return []
def count(self):
return 0
def _next_impl(self):
raise StopIteration
def next(self):
raise StopIteration
def options(self, **kw):
return self
def limit(self, limit):
return self
def skip(self, skip):
return self
def hint(self, index_or_name):
return self
def sort(self, *args, **kw):
return self
class DateJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
return json.JSONEncoder.default(self, obj)
def clean_phone_number(number):
pattern = re.compile('\W+')
number = pattern.sub('', number)
return number
def phone_number_hash(number):
number = clean_phone_number(number)
return hashlib.sha1(number).hexdigest()
@contextmanager
def skip_mod_date(model_cls):
""" Avoids updating 'mod_date'
Useful for saving cache on a model and things like that.
.. note:: This only works when the changes made to the model are flushed.
:Example:
from allura import model as M
key = self.can_merge_cache_key()
with utils.skip_mod_date(M.MergeRequest):
self.can_merge_cache[key] = val
session(self).flush(self)
:param model_cls: The model *class* being updated.
"""
skip_mod_date = getattr(session(model_cls)._get(), 'skip_mod_date', False)
session(model_cls)._get().skip_mod_date = True
try:
yield
finally:
session(model_cls)._get().skip_mod_date = skip_mod_date
|
apache-2.0
| 4,468,536,068,037,648,400
| 30.953198
| 92
| 0.609541
| false
| 3.780177
| false
| false
| false
|
jmoss2020/moss-advprog
|
Shapes.py
|
1
|
3209
|
class Circle():
def __init__(self, r):
self.r = r
def area(self):
return (self.r ** 2) * 3.14
def perimeter(self):
return self.r * 6.28
def __str__(self):
return "Circle has a radius of %.2f, an area of %.2f, and a perimeter of %.2f." % (self.r, self.area, self.perimeter)
class Rectangle():
def __init__(self, x, y):
self.x = x
self.y = y
def area(self):
return self.x * self.y
def perimeter(self):
return 2 * self.x + 2 * self.y
def __str__(self):
return "Rectangle has a hight of %.2f, a width of %.2f, an area of %.2f, and a perimeter of %.2f." % (self.y, self.x, self.area(), self.perimeter())
class Square(Rectangle):
def __init__(self,x):
self.x = x
self.y = x
def __str__(self):
return "Square has a side length of %.2f, an area of %.2f, and a perimeter of %.2f." % (self.y, self.area(), self.perimeter())
class RightTriangle():
def __init__(self, x, y):
self.x = x
self.y = y
self.hyp = self.hypotenuse()
def area(self):
return 0.5 * self.x * self.y
def hypotenuse(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def perimeter(self):
return self.hyp + self.x + self.y
def __str__(self):
return "Triangle has a hight of %.2f, a base of %.2f an area of %.2f, and a perimeter of %.2f." % (self.y, self.x, self.area(), self.perimeter())
class EquilateralRightTriangle(RightTriangle):
def __init__(self, x):
self.x = x
self.y = x
self.hyp = self.hypotenuse()
def __str__(self):
return "Triangle has a base and hight of %.2f an area of %.2f, and a perimeter of %.2f." % (self.y, self.area(), self.perimeter())
class Prism():
def surfacearea(self):
return 2 * self.area() + self.z * self.perimeter()
def volume(self):
return self.area() * self.z
class Cube(Square,Prism):
def __init__(self, x):
self.x = x
self.y = x
self.z = x
def __str__(self):
return "Cube has a width, hieght, and depth of %.2f, a surfacearea of %.2f, and a volume of %.2f." % (self.x, self.surfacearea(), self.volume())
class TriangularPrism(RightTriangle,Prism):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
self.hyp = self.hypotenuse()
def __str__(self):
return "Triangular Prism has a width of %.2f, a hieght of %.2f, a depth of %.2f, a surfacearea of %.2f, and a volume of %.2f." % (self.x, self.y, self.z, self.surfacearea(), self.volume())
class Cylinder(Circle,Prism):
def __init__(self, r, z):
self.r = r
self.z = z
def __str__(self):
return "Cylinder has a radius of %.2f, a hieght of %.2f, a surfacearea of %.2f, and a volume of %.2f." % (self.r, self.z, self.surfacearea(), self.volume())
# circle1=Circle(5)
# print circle1
# rectangle1=Rectangle(3,4)
# print rectangle1s
# square1=Square(6)
# print square1
# RT=RightTriangle(3,4)
# print RT
# ERT=EquilateralRightTriangle(2)
# print ERT
# Cube1=Cube(4)
# print Cube1
# TP=TriangularPrism(3,5)
# print TP
Cyl=Cylinder(1,2)
print Cyl
|
gpl-3.0
| 700,869,985,878,508,000
| 27.651786
| 196
| 0.569959
| false
| 2.976809
| false
| false
| false
|
jonathanhowells/sentimentCSV
|
sentimentCSV.py
|
1
|
4119
|
import os
import subprocess
import sys
import timeit
import csv
import pandas as pd
import re
import string
import numpy as np
import shutil
directory = os.getcwd()
os.chdir(directory)
stanford_directory = raw_input("Enter path to Stanford CoreNLP: ")
input_filename = raw_input("Enter input csv filename: ")
output_filename = raw_input("Enter output csv filename: ")
text_column = raw_input("Enter text column name: ")
print "Reading file..."
data = pd.read_csv(input_filename, error_bad_lines=False)
print "Cleaning comments..."
comments = data[text_column]
comments_clean = []
for comment in comments:
comment = re.sub(r'\n', r'',str(comment))
comment = re.sub(r'MR', r'',str(comment))
comment = re.sub(r'mr', r'',str(comment))
comment = re.sub(r'Mr', r'',str(comment))
comment = ' '.join(re.split(r'(?<=[.:;])\s', comment)[:1])
comment = comment.translate(string.maketrans("",""), string.punctuation)
comments_clean.append(comment)
comment_chunks=[comments_clean[x:x+2000] for x in xrange(0, len(comments_clean), 2000)]
input_directory = stanford_directory + '/input_data'
if not os.path.exists(input_directory):
os.makedirs(input_directory)
os.chdir(input_directory)
N = len(comment_chunks)
for n in range(N):
f = open("comments" + str(n) + ".txt", "w");
comments = comment_chunks[n]
for i in range(len(comments)):
if i == len(comments)-1:
f.write(str(comments[i]))
f.write(".")
else:
f.write(str(comments[i]))
f.write(". \n")
f.close()
os.chdir(stanford_directory)
sentiments = [' Neutral', ' Negative', ' Positive', ' Very positive', ' Very negative']
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def update_progress(progress):
barLength = 100 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format( "-"*block + " "*(barLength-block), round(progress*100,2), status)
sys.stdout.write(text)
sys.stdout.flush()
f = open("output.csv", "wb")
print "Calculating Sentiment..."
start = timeit.default_timer()
for n in range(N):
file_name = os.path.join('input_data', 'comments' + str(n) + '.txt')
p = subprocess.Popen('java -cp "*" -mx5g edu.stanford.nlp.sentiment.SentimentPipeline -file ' + file_name,
shell=True,
stdout=subprocess.PIPE)
output, errors = p.communicate()
senti_list = output.split('\n')
del output, errors
for i in range(len(senti_list)):
if i % 2 == 1 and senti_list[i] not in sentiments:
senti_list.insert(i, ' Neutral')
senti_list = senti_list[:-1]
output_list = list(chunks(senti_list, 2))
progress = float(n)/N
update_progress(progress)
#print "rows:", len(output_list)
writer = csv.writer(f, quoting=csv.QUOTE_ALL)
writer.writerows(output_list)
del senti_list, output_list
f.close()
shutil.rmtree(stanford_directory + '/input_data/')
stop = timeit.default_timer()
print "Time taken:", stop - start
output_frame = pd.read_csv("output.csv", header=None)
output_frame.columns = ['Text', 'Sentiment']
senti_text = np.array(output_frame['Text'])
senti_bool = []
for element in senti_text:
if element == '.':
senti_bool.append(0)
else:
senti_bool.append(1)
output_frame["Text_Bool"] = pd.Series(senti_bool)
del senti_bool
data['Sentiment'] = output_frame['Sentiment']
data['Text_Bool'] = output_frame['Text_Bool']
os.chdir('..')
print "Writing to output file..."
data.to_csv(output_filename)
print "Finished!"
|
gpl-2.0
| 6,229,005,747,004,761,000
| 26.278146
| 112
| 0.62151
| false
| 3.223005
| false
| false
| false
|
MalmoUniversity-DA366A/calvin-base
|
calvin/runtime/north/calvincontrol.py
|
1
|
20945
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import time
import datetime
import json
from calvin.Tools import cscompiler as compiler
from calvin.Tools import deployer
from calvin.utilities.calvinlogger import get_logger
from calvin.utilities.calvin_callback import CalvinCB
from calvin.runtime.south.plugins.async import server_connection
from urlparse import urlparse
_log = get_logger(__name__)
control_api_doc = ""
# control_api_doc += \
"""
GET /log
Streaming log from calvin node (more documentation needed)
"""
re_get_log = re.compile(r"GET /log\sHTTP/1")
control_api_doc += \
"""
GET /id
Get id of this calvin node
Response: node-id
"""
re_get_node_id = re.compile(r"GET /id\sHTTP/1")
control_api_doc += \
"""
GET /nodes
List nodes in network (excluding self)
Response: List of node-ids
"""
re_get_nodes = re.compile(r"GET /nodes\sHTTP/1")
control_api_doc += \
"""
GET /node/{node-id}
Get information on node node-id
Response:
{
"attributes": null,
"control_uri": "http://<address>:<controlport>",
"uri": "calvinip://<address>:<port>"
}
"""
re_get_node = re.compile(r"GET /node/((NODE_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\sHTTP/1")
control_api_doc += \
"""
POST /peer_setup
Add calvin nodes to network
Body: {"peers: ["calvinip://<address>:<port>", ...] }
Response: {"result": "OK"}
"""
re_post_peer_setup = re.compile(r"POST /peer_setup\sHTTP/1")
control_api_doc += \
"""
GET /applications
Get applications launched from this node
Response: List of application ids
"""
re_get_applications = re.compile(r"GET /applications\sHTTP/1")
control_api_doc += \
"""
GET /application/{application-id}
Get information on application application-id
Response:
{
"origin_node_id": <node id>,
"actors": <list of actor ids>
"name": <name or id of this application>
}
"""
re_get_application = re.compile(r"GET /application/((APP_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\sHTTP/1")
control_api_doc += \
"""
DELETE /application/{application-id}
Stop application (only applications launched from this node)
Response: {"result: "OK"}
"""
re_del_application = re.compile(r"DELETE /application/((APP_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\sHTTP/1")
control_api_doc += \
"""
POST /actor
Create a new actor
Body:
{
"actor_type:" <type of actor>,
"args" : { "name": <name of actor>, <actor argument>:<value>, ... }
"deploy_args" : {"app_id": <application id>, "app_name": <application name>} (optional)
}
Response: {"actor_id": <actor-id>}
"""
re_post_new_actor = re.compile(r"POST /actor\sHTTP/1")
control_api_doc += \
"""
GET /actors
Get list of actors on this runtime
Response: list of actor ids
"""
re_get_actors = re.compile(r"GET /actors\sHTTP/1")
control_api_doc += \
"""
GET /actor/{actor-id}
Get information on actor
Response:
{
"inports": list inports
"node_id": <node-id>,
"type": <actor type>,
"name": <actor name>,
"outports": list of outports
}
"""
re_get_actor = re.compile(r"GET /actor/((ACTOR_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\sHTTP/1")
control_api_doc += \
"""
DELETE /actor/{actor-id}
Delete actor
Response: {"result": "OK"}
"""
re_del_actor = re.compile(r"DELETE /actor/((ACTOR_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\sHTTP/1")
control_api_doc += \
"""
GET /actor/{actor-id}/report
Some actor store statistics on inputs and outputs, this reports these. Not always present.
Repsonse: Depends on actor
"""
re_get_actor_report = re.compile(r"GET /actor/((ACTOR_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/report\sHTTP/1")
control_api_doc += \
"""
POST /actor/{actor-id}/migrate
Migrate actor to (other) node
Body: {"peer_node_id": <node-id>}
Response: {"result": "ACK"}
"""
re_post_actor_migrate = re.compile(r"POST /actor/((ACTOR_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/migrate\sHTTP/1")
control_api_doc += \
"""
POST /actor/{actor-id}/disable
DEPRECATED. Disables an actor
Response: {"result": "OK"}
"""
re_post_actor_disable = re.compile(r"POST /actor/((ACTOR_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/disable\sHTTP/1")
# control_api_doc += \
"""
GET /actor/{actor-id}/port/{port-id}
Broken. Get information on port {port-id} of actor {actor-id}
"""
re_get_port = re.compile(r"GET /actor/((ACTOR_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/port/((PORT_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\sHTTP/1")
control_api_doc += \
"""
POST /connect
Connect actor ports
Body:
{
"actor_id" : <actor-id>,
"port_name": <port-name>,
"port_dir": <in/out>,
"peer_node_id": <node-id>,
"peer_actor_id": <actor-id>,
"peer_port_name": <port-name>,
"peer_port_dir": <out/in>
}
Response: {"result": "OK"}
"""
re_post_connect = re.compile(r"POST /connect\sHTTP/1")
control_api_doc += \
"""
POST /set_port_property
Sets a property of the port. Currently only fanout on outports is supported.
Body:
{
"actor_id" : <actor-id>,
"port_type": <in/out>,
"port_name": <port-name>,
"port_property": <property-name>
"value" : <property value>
}
Response: {"result": "OK"}
"""
re_set_port_property = re.compile(r"POST /set_port_property\sHTTP/1")
control_api_doc += \
"""
POST /deploy
Compile and deploy a calvin script to this calvin node
Body:
{
"name": <application name>,
"script": <calvin script>
}
Response: {"application_id": <application-id>}
"""
re_post_deploy = re.compile(r"POST /deploy\sHTTP/1")
control_api_doc += \
"""
POST /disconnect
Disconnect a port. If port felds are empty, all ports of the actor are disconnected
Body:
{
"actor_id": <actor-id>,
"port_name": <port-name>,
"port_dir": <in/out>,
"port_id": <port-id>
}
Response: {"result": "OK"}
"""
re_post_disconnect = re.compile(r"POST /disconnect\sHTTP/1")
control_api_doc += \
"""
DELETE /node
Stop (this) calvin node
Response: {"result": "OK"}
"""
re_delete_node = re.compile(r"DELETE /node\sHTTP/1")
control_api_doc += \
"""
POST /index/{key}
Store value under index key
Body:
{
"value": <string>
}
Response: {"result": "true"}
"""
re_post_index = re.compile(r"POST /index/([0-9a-zA-Z\.\-/]*)\sHTTP/1")
control_api_doc += \
"""
DELETE /index/{key}
Remove value from index key
Body:
{
"value": <string>
}
Response: {"result": "true"}
"""
re_delete_index = re.compile(r"DELETE /index/([0-9a-zA-Z\.\-/]*)\sHTTP/1")
control_api_doc += \
"""
GET /index/{key}
Fetch values under index key
Response: {"result": <list of strings>}
"""
re_get_index = re.compile(r"GET /index/([0-9a-zA-Z\.\-/]*)\sHTTP/1")
_calvincontrol = None
def get_calvincontrol():
""" Returns the CalvinControl singleton
"""
global _calvincontrol
if _calvincontrol is None:
_calvincontrol = CalvinControl()
return _calvincontrol
class CalvinControl(object):
""" A HTTP REST API for calvin nodes
"""
def __init__(self):
self.node = None
self.log_connection = None
self.routes = None
self.server = None
self.connections = {}
def start(self, node, uri):
""" Start listening and handle request on uri
"""
self.port = int(urlparse(uri).port)
self.host = urlparse(uri).hostname
_log.info("Listening on: %s:%s" % (self.host, self.port))
self.node = node
# Set routes for requests
self.routes = [
(re_get_log, self.handle_get_log),
(re_get_node_id, self.handle_get_node_id),
(re_get_nodes, self.handle_get_nodes),
(re_get_node, self.handle_get_node),
(re_post_peer_setup, self.handle_peer_setup),
(re_get_applications, self.handle_get_applications),
(re_get_application, self.handle_get_application),
(re_del_application, self.handle_del_application),
(re_post_new_actor, self.handle_new_actor),
(re_get_actors, self.handle_get_actors),
(re_get_actor, self.handle_get_actor),
(re_del_actor, self.handle_del_actor),
(re_get_actor_report, self.handle_get_actor_report),
(re_post_actor_migrate, self.handle_actor_migrate),
(re_post_actor_disable, self.handle_actor_disable),
(re_get_port, self.handle_get_port),
(re_post_connect, self.handle_connect),
(re_set_port_property, self.handle_set_port_property),
(re_post_deploy, self.handle_deploy),
(re_delete_node, self.handle_quit),
(re_post_disconnect, self.handle_disconnect),
(re_post_index, self.handle_post_index),
(re_delete_index, self.handle_delete_index),
(re_get_index, self.handle_get_index)
]
self.server = server_connection.ServerProtocolFactory(self.handle_request, "raw")
self.server.start(self.host, self.port)
def stop(self):
""" Stop
"""
self.server.stop()
def handle_request(self, actor_ids=None):
""" Handle incoming requests
"""
if self.server.pending_connections:
addr, conn = self.server.accept()
self.connections[addr] = conn
for handle, connection in self.connections.items():
if connection.data_available:
data = connection.data_get()
found = False
for route in self.routes:
match = route[0].match(data)
if match:
http_data = data.split("\r\n\r\n")[1]
if http_data:
http_data = json.loads(http_data)
_log.debug("Calvin control handles:\n%s\n---------------" % data)
route[1](handle, connection, match, http_data)
found = True
break
if not found:
_log.error("No route found for: %s" % data)
self.send_response(
handle, connection, "HTTP/1.0 404 Not Found\r\n")
def send_response(self, handle, connection, data):
""" Send response header text/html
"""
if not connection.connection_lost:
connection.send("HTTP/1.0 200 OK\n"
+ "Content-Type: application/json\n"
+
"Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS\n"
+ "Access-Control-Allow-Origin: *\r\n"
+ "\n")
connection.send(data)
connection.close()
del self.connections[handle]
def send_streamheader(self, connection):
""" Send response header for text/event-stream
"""
if not connection.connection_lost:
connection.send("HTTP/1.0 200 OK\n"
+ "Content-Type: text/event-stream\n"
+ "Access-Control-Allow-Origin: *\r\n"
+ "\n")
def storage_cb(self, key, value, handle, connection):
self.send_response(handle, connection, json.dumps(value))
def handle_get_log(self, handle, connection, match, data):
""" Get log stream
"""
self.log_connection = connection
self.send_streamheader(connection)
def handle_get_node_id(self, handle, connection, match, data):
""" Get node id from this node
"""
self.send_response(
handle, connection, json.dumps({'id': self.node.id}))
def handle_peer_setup(self, handle, connection, match, data):
self.node.peersetup(data['peers'])
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_get_nodes(self, handle, connection, match, data):
""" Get active nodes
"""
self.send_response(
handle, connection, json.dumps(self.node.network.list_links()))
def handle_get_node(self, handle, connection, match, data):
""" Get node information from id
"""
self.node.storage.get_node(match.group(1), CalvinCB(
func=self.storage_cb, handle=handle, connection=connection))
def handle_get_applications(self, handle, connection, match, data):
""" Get applications
"""
self.send_response(
handle, connection, json.dumps(self.node.app_manager.list_applications()))
def handle_get_application(self, handle, connection, match, data):
""" Get application from id
"""
self.node.storage.get_application(match.group(1), CalvinCB(
func=self.storage_cb, handle=handle, connection=connection))
def handle_del_application(self, handle, connection, match, data):
""" Delete application from id
"""
self.node.app_manager.destroy(match.group(1))
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_new_actor(self, handle, connection, match, data):
""" Create actor
"""
actor_id = self.node.new(actor_type=data['actor_type'], args=data[
'args'], deploy_args=data['deploy_args'])
self.send_response(
handle, connection, json.dumps({'actor_id': actor_id}))
def handle_get_actors(self, handle, connection, match, data):
""" Get actor list
"""
actors = self.node.am.list_actors()
self.send_response(
handle, connection, json.dumps(actors))
def handle_get_actor(self, handle, connection, match, data):
""" Get actor from id
"""
self.node.storage.get_actor(match.group(1), CalvinCB(
func=self.storage_cb, handle=handle, connection=connection))
def handle_del_actor(self, handle, connection, match, data):
""" Delete actor from id
"""
self.node.am.destroy(match.group(1))
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_get_actor_report(self, handle, connection, match, data):
""" Get report from actor
"""
self.send_response(
handle, connection, json.dumps(self.node.am.report(match.group(1))))
def handle_actor_migrate(self, handle, connection, match, data):
""" Migrate actor
"""
self.node.am.migrate(match.group(1), data['peer_node_id'],
callback=CalvinCB(self.actor_migrate_cb, handle, connection))
def actor_migrate_cb(self, handle, connection, status, *args, **kwargs):
""" Migrate actor respons
"""
self.send_response(handle, connection, json.dumps({'result': status}))
def handle_actor_disable(self, handle, connection, match, data):
self.node.am.disable(match.group(1))
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_get_port(self, handle, connection, match, data):
""" Get port from id
"""
self.node.storage.get_port(match.group(2), CalvinCB(
func=self.storage_cb, handle=handle, connection=connection))
def handle_connect(self, handle, connection, match, data):
""" Connect port
"""
if "actor_id" not in data:
data["actor_id"] = None
if "port_name" not in data:
data["port_name"] = None
if "port_dir" not in data:
data["port_dir"] = None
if "port_id" not in data:
data["port_id"] = None
if "peer_node_id" not in data:
data["peer_node_id"] = None
if "peer_actor_id" not in data:
data["peer_actor_id"] = None
if "peer_port_name" not in data:
data["peer_port_name"] = None
if "peer_port_dir" not in data:
data["peer_port_dir"] = None
if "peer_port_id" not in data:
data["peer_port_id"] = None
self.node.connect(
actor_id=data["actor_id"],
port_name=data["port_name"],
port_dir=data["port_dir"],
port_id=data["port_id"],
peer_node_id=data["peer_node_id"],
peer_actor_id=data["peer_actor_id"],
peer_port_name=data["peer_port_name"],
peer_port_dir=data["peer_port_dir"],
peer_port_id=data["peer_port_id"])
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_set_port_property(self, handle, connection, match, data):
self.node.am.set_port_property(
actor_id=data["actor_id"],
port_type=data["port_type"],
port_name=data["port_name"],
port_property=data["port_property"],
value=data["value"])
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_deploy(self, handle, connection, match, data):
print "data: ", data
app_info, errors, warnings = compiler.compile(
data["script"], filename=data["name"])
app_info["name"] = data["name"]
d = deployer.Deployer(
runtime=None, deployable=app_info, node_info=None, node=self.node)
app_id = d.deploy()
self.send_response(
handle, connection, json.dumps({'application_id': app_id}))
def handle_quit(self, handle, connection, match, data):
self.node.stop()
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_disconnect(self, handle, connection, match, data):
self.node.disconnect(
data['actor_id'], data['port_name'], data['port_dir'], data['port_id'])
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_post_index(self, handle, connection, match, data):
""" Add to index
"""
self.node.storage.add_index(
match.group(1), data['value'], cb=CalvinCB(self.index_cb, handle, connection))
def handle_delete_index(self, handle, connection, match, data):
""" Remove from index
"""
self.node.storage.remove_index(
match.group(1), data['value'], cb=CalvinCB(self.index_cb, handle, connection))
def handle_get_index(self, handle, connection, match, data):
""" Get from index
"""
self.node.storage.get_index(
match.group(1), cb=CalvinCB(self.get_index_cb, handle, connection))
def index_cb(self, handle, connection, *args, **kwargs):
""" Index operation response
"""
_log.debug("index cb (in control) %s, %s" % (args, kwargs))
if 'value' in kwargs:
value = kwargs['value']
else:
value = None
self.send_response(handle, connection, json.dumps({'result': value}))
def get_index_cb(self, handle, connection, key, value, *args, **kwargs):
""" Index operation response
"""
_log.debug("get index cb (in control) %s, %s" % (key, value))
self.send_response(handle, connection, json.dumps({'result': value}))
def log_firing(self, actor_name, action_method, tokens_produced, tokens_consumed, production):
""" Trace firing, sends data on log_sock
"""
if self.log_connection is not None:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
data = {}
data['timestamp'] = st
data['node_id'] = self.node.id
data['type'] = 'fire'
data['actor'] = actor_name
data['action_method'] = action_method
data['produced'] = tokens_produced
data['consumed'] = tokens_consumed
self.log_connection.send("data: %s\n\n" % json.dumps(data))
|
apache-2.0
| -3,372,114,627,838,210,600
| 32.836834
| 195
| 0.575651
| false
| 3.404031
| false
| false
| false
|
lyzardiar/RETools
|
PublicTools/bin/tools/packLuaJit.py
|
1
|
1726
|
#coding=utf-8
import os
import os.path
import shutil
import sys
import getopt
import string
import fnmatch
import md5
import hashlib
import zipfile
import time
import threading
import struct
from pprint import pprint
from struct import *
projectdir = os.path.dirname(os.path.realpath(__file__))
compileBin = os.path.join(projectdir, "bin/compile_scripts.bat")
def iter_find_files(path, fnexp):
for root, dirs, files, in os.walk(path):
for filename in fnmatch.filter(files, fnexp):
yield os.path.join(root, filename)
def work_file(filepath):
filepath = os.path.realpath(filepath)
cmd = compileBin + (" -i %s -o %s -m files -jit" % (filepath, filepath))
os.system(cmd)
def work_async(tardir):
cmd = compileBin + (" -i %s -o %s -m files -jit" % (tardir, tardir))
os.system(cmd)
# for filename in iter_find_files(tardir, "*.*"):
# if filename.find(".lua") != -1:
# work_file(filename)
# pass
def work():
if len(sys.argv) > 1:
inputFile = sys.argv[1]
for i in range(1, len(sys.argv)):
filepath = os.path.realpath(sys.argv[i])
if os.path.isdir(filepath):
work_async(filepath)
else:
work_file(filepath)
else:
curdir = r"C:\WorkSpace\Public\TX\Android\Versions\config2"
curdir = r"C:\WorkSpace\Public\TX\Android\Versions\Ver0.1.0.34809_encode"
curdir = r"C:\WorkSpace\Public\TX\Android\markVersion_35742-35779_2"
work_async(curdir)
os.system("pause")
if __name__ == '__main__':
work()
# try:
# work()
# except Exception, e:
# print Exception, e
|
mit
| 5,455,863,543,919,101,000
| 26.854839
| 81
| 0.598494
| false
| 3.351456
| false
| false
| false
|
CIGIHub/greyjay
|
greyjay/themes/models.py
|
1
|
4991
|
from __future__ import absolute_import, division, unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.wagtailadmin.edit_handlers import (
FieldPanel,
InlinePanel,
MultiFieldPanel,
)
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailcore.models import Page
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel
from wagtail.wagtailsnippets.models import register_snippet
from . import utils
@python_2_unicode_compatible
class ThemeContent(ClusterableModel):
name = models.CharField(max_length=255)
contact_email = models.EmailField(
blank=True,
null=True,
help_text="Only provide if this should be different from the site default email contact address.",
)
default = models.BooleanField(default=False)
panels = [
FieldPanel('name'),
FieldPanel('contact_email'),
FieldPanel('default'),
InlinePanel('block_links', label="Content Blocks"),
InlinePanel('follow_links', label="Follow Links"),
InlinePanel('logo_links', label="Logos"),
]
def __str__(self):
return self.name
register_snippet(ThemeContent)
@python_2_unicode_compatible
class Theme(models.Model):
name = models.CharField(max_length=1024)
folder = models.CharField(max_length=1024, default="themes/default")
content = models.ForeignKey(ThemeContent, null=True)
def __str__(self):
return self.name
panels = [
FieldPanel('name'),
FieldPanel('folder'),
SnippetChooserPanel('content'),
]
register_snippet(Theme)
class ThemeablePage(Page):
'''
Abstract model class to inherit from for themable pages
'''
is_creatable = False
class Meta:
abstract = True
theme = models.ForeignKey(
Theme,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
def get_template(self, request, *args, **kwargs):
original_template = super(ThemeablePage, self).get_template(request, *args, **kwargs)
if self.theme is None:
return original_template
custom_template = utils.get_themed_template_name(self.theme, original_template)
if utils.template_exists(custom_template):
return custom_template
return original_template
style_panels = [
MultiFieldPanel(
[
SnippetChooserPanel('theme'),
],
heading="Theme"
),
]
@python_2_unicode_compatible
class TextBlock(models.Model):
name = models.CharField(max_length=255)
usage = models.CharField(max_length=255, blank=True, default="")
heading = models.TextField(blank=True, default="")
content = RichTextField(blank=True, default="")
panels = [
FieldPanel('name'),
FieldPanel('heading'),
FieldPanel('content'),
FieldPanel('usage'),
]
def __str__(self):
return self.name
register_snippet(TextBlock)
@python_2_unicode_compatible
class FollowLink(models.Model):
name = models.CharField(max_length=255)
usage = models.CharField(max_length=255, blank=True, default="")
link = models.CharField(max_length=1024)
panels = [
FieldPanel('name'),
FieldPanel('link'),
FieldPanel('usage'),
]
def __str__(self):
return self.name
register_snippet(FollowLink)
@python_2_unicode_compatible
class LogoBlock(models.Model):
name = models.CharField(max_length=255)
usage = models.CharField(max_length=255, blank=True, default="")
logo = models.ForeignKey(
'images.AttributedImage',
)
link = models.CharField(max_length=2048, blank=True, null=True)
panels = [
FieldPanel('name'),
ImageChooserPanel('logo'),
FieldPanel('link'),
FieldPanel('usage'),
]
def __str__(self):
return self.name
register_snippet(LogoBlock)
class ContentBlockLink(models.Model):
block = models.ForeignKey(
"TextBlock",
related_name='content_links'
)
theme_content = ParentalKey(
"ThemeContent",
related_name='block_links'
)
panels = [SnippetChooserPanel("block")]
class ContentFollowLink(models.Model):
block = models.ForeignKey(
"FollowLink",
related_name='content_links'
)
theme_content = ParentalKey(
"ThemeContent",
related_name='follow_links'
)
panels = [SnippetChooserPanel("block")]
class ContentLogoLink(models.Model):
block = models.ForeignKey(
"LogoBlock",
related_name='content_links'
)
theme_content = ParentalKey(
"ThemeContent",
related_name='logo_links'
)
panels = [SnippetChooserPanel("block")]
|
mit
| -7,877,395,620,834,723,000
| 23.707921
| 106
| 0.651773
| false
| 3.9928
| false
| false
| false
|
devilry/devilry-django
|
devilry/devilry_frontpage/cradminextensions/listbuilder/listbuilder_role.py
|
1
|
5167
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.translation import pgettext_lazy
from cradmin_legacy import crapp
from cradmin_legacy.crinstance import reverse_cradmin_url
from cradmin_legacy.viewhelpers import listbuilder
from devilry.devilry_cradmin import devilry_listbuilder
class AbstractRoleItemValue(listbuilder.itemvalue.TitleDescription):
valuealias = 'user'
def get_devilryrole(self):
raise NotImplementedError()
def get_extra_css_classes_list(self):
return [
'devilry-frontpage-listbuilder-roleselect-itemvalue',
'devilry-frontpage-listbuilder-roleselect-itemvalue-{}'.format(self.get_devilryrole()),
]
class StudentRoleItemValue(AbstractRoleItemValue):
"""
Listbuilder ItemValue renderer for information about the student devilryrole.
"""
def get_devilryrole(self):
return 'student'
def get_title(self):
return pgettext_lazy('role', 'Student')
def get_description(self):
return pgettext_lazy('roleselect',
'Upload deliveries or see your delivery and feedback history.')
class ExaminerRoleItemValue(AbstractRoleItemValue):
"""
Listbuilder ItemValue renderer for information about the examiner devilryrole.
"""
def get_devilryrole(self):
return 'examiner'
def get_title(self):
return pgettext_lazy('role', 'Examiner')
def get_description(self):
return pgettext_lazy('roleselect',
'Give students feedback on their deliveries as examiner.')
class AnyAdminRoleItemValue(AbstractRoleItemValue):
"""
Listbuilder ItemValue renderer for information about the anyadmin devilryrole.
"""
def get_devilryrole(self):
return 'anyadmin'
def get_title(self):
return pgettext_lazy('role', 'Administrator')
def get_description(self):
return pgettext_lazy('roleselect',
'Manage departments, courses, semesters and assignments.')
class AbstractRoleItemFrame(devilry_listbuilder.common.GoForwardLinkItemFrame):
valuealias = 'user'
def get_url(self):
raise NotImplementedError()
def get_devilryrole(self):
raise NotImplementedError()
def get_extra_css_classes_list(self):
return [
'devilry-frontpage-listbuilder-roleselect-itemframe',
'devilry-frontpage-listbuilder-roleselect-itemframe-{}'.format(self.get_devilryrole()),
]
class StudentRoleItemFrame(AbstractRoleItemFrame):
"""
Listbuilder ItemFrame renderer for the student devilryrole.
"""
def get_devilryrole(self):
return 'student'
def get_url(self):
return reverse_cradmin_url(
instanceid='devilry_student',
appname='dashboard',
roleid=None,
viewname=crapp.INDEXVIEW_NAME)
class ExaminerRoleItemFrame(AbstractRoleItemFrame):
"""
Listbuilder ItemFrame renderer for the examiner devilryrole.
"""
def get_devilryrole(self):
return 'examiner'
def get_url(self):
return reverse_cradmin_url(
instanceid='devilry_examiner',
appname='assignmentlist',
roleid=None,
viewname=crapp.INDEXVIEW_NAME)
class AnyAdminRoleItemFrame(AbstractRoleItemFrame):
"""
Listbuilder ItemFrame renderer for the anyadmin devilryrole.
"""
def get_devilryrole(self):
return 'anyadmin'
def get_url(self):
return reverse_cradmin_url(
instanceid='devilry_admin',
appname='overview',
roleid=None,
viewname=crapp.INDEXVIEW_NAME)
class RoleSelectList(listbuilder.lists.RowList):
def __init__(self, user):
super(RoleSelectList, self).__init__()
self.user = user
self.__build_list()
def __append_student_item(self):
item = StudentRoleItemFrame(inneritem=StudentRoleItemValue(value=self.user))
self.append(item)
def __append_examiner_item(self):
item = ExaminerRoleItemFrame(inneritem=ExaminerRoleItemValue(value=self.user))
self.append(item)
def __append_anyadmin_item(self):
item = AnyAdminRoleItemFrame(inneritem=AnyAdminRoleItemValue(value=self.user))
self.append(item)
def __build_list(self):
user_model = get_user_model()
self.user_is_student = user_model.objects.user_is_student(self.user)
self.user_is_examiner = user_model.objects.user_is_examiner(self.user)
self.user_is_anyadmin = user_model.objects.user_is_admin_or_superuser(self.user)
self.user_has_no_roles = True
if self.user_is_student:
self.__append_student_item()
self.user_has_no_roles = False
if self.user_is_examiner:
self.__append_examiner_item()
self.user_has_no_roles = False
if self.user_is_anyadmin:
self.__append_anyadmin_item()
self.user_has_no_roles = False
def get_extra_css_classes_list(self):
return ['devilry-frontpage-roleselectlist']
|
bsd-3-clause
| 8,373,252,969,505,380,000
| 30.315152
| 99
| 0.660538
| false
| 3.827407
| false
| false
| false
|
serkansokmen/qn
|
qn/wsgi.py
|
1
|
2213
|
"""
WSGI config for qn project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
#import sys
#import site
#import subprocess
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../")
# Add the virtualenv packages to the site directory. This uses the technique
# described at http://code.google.com/p/modwsgi/wiki/VirtualEnvironments
# Remember original sys.path.
#prev_sys_path = list(sys.path)
# Get the path to the env's site-packages directory
#site_packages = subprocess.check_output([
# os.path.join(PROJECT_ROOT, '.virtualenv/bin/python'),
# '-c',
# 'from distutils.sysconfig import get_python_lib;'
# 'print get_python_lib(),'
#]).strip()
# Add the virtualenv site-packages to the site packages
#site.addsitedir(site_packages)
# Reorder sys.path so the new directories are at the front.
#new_sys_path = []
#for item in list(sys.path):
# if item not in prev_sys_path:
# new_sys_path.append(item)
# sys.path.remove(item)
#sys.path[:0] = new_sys_path
# Add the app code to the path
#sys.path.append(PROJECT_ROOT)
os.environ['CELERY_LOADER'] = 'django'
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "qn.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
bsd-3-clause
| 1,427,026,455,742,831,900
| 34.126984
| 79
| 0.729327
| false
| 3.80895
| false
| false
| false
|
chiara-paci/costruttoridimondi
|
costruttoridimondi/functional_tests/test_sharing.py
|
1
|
1528
|
from selenium import webdriver
from . import pages,base
class SharingTest(base.MultiuserFunctionalTest):
def test_logged_in_users_stories_are_saved_as_my_stories(self):
# Edith is a logged-in user
self.set_browser('edith@example.com',size=(700,900),position=(0,0))
# Her friend Oniciferous is also hanging out on the stories site
oni_browser = self.create_user_browser_with_session('oniciferous@example.com',size=(700,900),position=(700,0))
# Edith goes to the home page and starts a list
e_story_page = pages.HomePage(self).start_new_story('Get help')
# She notices a "Share this story" option
share_box = story_page.get_share_box()
self.assertEqual(share_box.get_attribute('placeholder'),
'your-friend@example.com')
# She shares her story.
# The page updates to say that it's shared with Oniciferous:
e_story_page.share_story_with('oniciferous@example.com')
self.set_browser('oniciferous@example.com')
#self.browser = oni_browser
mystory_page=HomePage(self).go_to_home_page().click_on_mystories_link()
o_story_page=mystory_page.click_on_story_link('Get help')
self.wait_for(lambda: self.assertEqual(
o_story_page.get_story_owner(),
'edith@example.com'
))
o_story_page.add_section('Hi Edith!')
self.set_browser('edith@example.com')
o_story_page.wait_for_new_section_in_story('Hi Edith!', 2)
|
gpl-3.0
| 7,599,141,757,490,930,000
| 36.268293
| 120
| 0.644634
| false
| 3.336245
| false
| false
| false
|
jpancoast/aws-py-tools
|
checkSGForOutboundAll.py
|
1
|
1424
|
#!/usr/bin/env python
import sys
import signal
import boto.ec2
import operator
import getopt
from lib.AWSPyTools import ParseOptions
from lib.AWSPyTools import AWSPyTools
def main(argv):
signal.signal(signal.SIGINT, signal_handler)
po = ParseOptions(sys.argv)
(env, region, vpc_id) = po.getAwsOptions()
debug = False
awsPyTools = AWSPyTools(
region=region, environment=env, loadFromFile=False, debug=debug)
envDataString = "Running in environment: " + env + ", region: " + region
if vpc_id is not None:
envDataString += ", vpc_id: " + vpc_id
print envDataString
sgs = awsPyTools.getAllSecurityGroups(vpc_id=vpc_id)
for sgName in sgs:
sg = sgs[sgName]
if len(sg.rules_egress) > 0:
for rule in sg.rules_egress:
for grant in rule.grants:
if (rule.from_port is None or rule.from_port == 'None') and (rule.to_port is None or rule.to_port == 'None') and (rule.ip_protocol == '-1') and (str(grant.cidr_ip) == '0.0.0.0/0'):
print str(sg.name) + " (" + sg.id + ") has OUTBOUND ALL, so I'm removing that rule"
print ""
awsPyTools.revokeSGRule('egress', sg, rule, grant)
def signal_handler(signal, frame):
print sys.argv[0] + " exited via keyboard interrupt."
sys.exit(0)
if __name__ == "__main__":
main(sys.argv)
|
gpl-2.0
| -3,898,750,051,795,106,300
| 26.921569
| 200
| 0.605337
| false
| 3.406699
| false
| false
| false
|
barentsen/dave
|
diffimg/fastpsffit.py
|
1
|
4310
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 19 16:39:13 2018
A much faster PRF fitter, with the caveat that the psf model is hardcoded.
psffit.py can fit an arbitrary PSF model to an image. The cost of this flexibility
is that it must perform numerical intergration to calculate the flux in each pixel.
This is slow. On my test machine, a 10x12 image takes 20ms to compute.
Since by far the most common model to fit is that of a symmetric Gaussian function with
a constant sky background, and this model can be computed quite quickly, this module
enables this special case to be run much faster. On the same machine, the same image
can be computed in 95.7us, or a x200 speed up. There's still more speed up to
be had if you make a Model() class that assigns memory for the model once and overwrites
it each time instead of computing from scratch in each call.
The downside is that none of the code is shared with the general purpose code.
Efforts to use numba don't seem to help much for some reason
The only two public methods are
* fastGaussianPrfFit
* computeModel
@author: fergal
"""
from __future__ import print_function
from __future__ import division
from pdb import set_trace as debug
import scipy.optimize as spOpt
from numba.types import CPointer, float64, intc
from scipy.special import erf
import numpy as np
def fastGaussianPrfFit(img, guess):
"""Fit a Symmetric Gaussian PSF to an image, really quickly
Inputs
--------
img
(2d numpy array) Image to fit
prfFunc
(function) Model to fit. See module level documentation for more details.
guess
(tuple or array) Elements are
* col0, row0
Location of PSF centroid
* sigma
Width of gaussian
* flux
Height of gaussian. Beware this is not normalized
* sky
Background level
Returns
------------
A scipy.optiminze.ResultsObject. The .x attribute contains the best fit parameters
"""
assert len(guess) == 5
mask = None
soln = spOpt.minimize(costFunc, guess,args=(img,mask), method='Nelder-Mead', bounds=None)
return soln
def costFunc(arglist, img, mask=None):
"""Compute difference between image and its model for given model params
Inputs
----------
arglist
(tuple or array) Tunable parameters of model
func
(function) Model to fit
img
(2d np array) Image to fit
Optional Inputs
----------------
mask
(2d np array) Zero elements of mask indicate bad data which should not be
included in the fit
Returns
----------
float
"""
nr, nc = img.shape
model = computeModel(nc, nr, arglist)
diff = img - model
if mask is not None:
assert np.all( mask.shape == img.shape)
diff[~mask] = 0
img[~mask] = 0 #In case bad values are set to Nan
cost = np.sqrt( np.sum(diff**2) )
return cost
def computeModel(numCols, numRows, arglist):
"""Compute model flux for an image with size (numCols, numRows)
Inputs
-------
numCols, numRows
(ints) Shape of the image to compute the model PRF for
func
(function) Model PRF
arglist
(tuple or array) Tunable parameters of the model
Returns
----------
A 2d numpy array representing the model PRF image.
"""
model = np.zeros( (numRows, numCols) )
xc = np.arange(numCols)
xr = np.arange(numRows)
cols, rows = np.meshgrid(xc, xr)
model = analytic_gaussian_integral(cols, rows, *arglist)
return model
def analytic_gaussian_integral(col, row, col0, row0, sigma0, flux0, sky):
z_col1 = .5 * (col - col0) / sigma0
z_col2 = .5 * (col+1 - col0) / sigma0
z_row1 = .5 * (row - row0) / sigma0
z_row2 = .5 * (row+1 - row0) / sigma0
flux = flux0
flux *= phi(z_col2) - phi(z_col1)
flux *= phi(z_row2) - phi(z_row1)
flux += sky
return flux
#Precompute for speed
sqrt2 = np.sqrt(2)
def phi(z):
"""Compute integral of gaussian function in the range (-Inf, z],
`z` is defined as (x - x0) / sigma, where x0 is the central value of the Gaussian.
See `scipy.special.erf` for details
"""
return .5 * ( 1 + erf(z/sqrt2) )
|
mit
| 5,773,724,286,923,327,000
| 24.502959
| 93
| 0.639675
| false
| 3.705933
| false
| false
| false
|
caedesvvv/b2rex
|
scripts/b2rexpkg/charexporter.py
|
1
|
8874
|
"""
RealXtend character exporter
"""
import os
import b2rexpkg
from b2rexpkg.siminfo import GridInfo
from b2rexpkg.simconnection import SimConnection
from b2rexpkg.ogre_exporter import OgreExporter
from b2rexpkg.hooks import reset_uuids
from ogrepkg.base import indent
from ogrepkg.armatureexport import GetArmatureObject
from ogremeshesexporter import ArmatureAnimationProxyManager, ArmatureAnimationProxy
import Blender
class CharacterExporter(object):
action_uuids = {'Walk': '6ed24bd8-91aa-4b12-ccc7-c97c857ab4e0',
'CrouchWalk': "47f5f6fb-22e5-ae44-f871-73aaaf4a6022",
'Fly': "aec4610c-757f-bc4e-c092-c6e9caf18daf",
"HoverDown": "20f063ea-8306-2562-0b07-5c853b37b31e",
"HoverUp": "62c5de58-cb33-5743-3d07-9e4cd4352864",
"Hover": "4ae8016b-31b9-03bb-c401-b1ea941db41d",
"Run": "05ddbff8-aaa9-92a1-2b74-8fe77a29b445",
"Sit": "1a5fe8ac-a804-8a5d-7cbd-56bd83184568",
"SitGround": "1c7600d6-661f-b87b-efe2-d7421eb93c86",
"Stand": "2408fe9e-df1d-1d7d-f4ff-1384fa7b350f"}
def __init__(self):
# rest
self.gridinfo = GridInfo()
self.sim = SimConnection()
self.ogre = OgreExporter()
self.settings = {}
self.actions_map = {}
for name in self.action_uuids:
self.actions_map[name.lower()] = name
def connect(self, base_url):
"""
Connect to an opensim instance
"""
self.gridinfo.connect(base_url)
print self.sim.connect(base_url)
def test(self):
"""
Api tests
"""
print self.gridinfo.getGridInfo()["gridnick"]
regions = self.gridinfo.getRegions()
for id in regions:
region = regions[id]
print " *", region["name"], region["x"], region["y"], id
# xmlrpc
print self.sim.login("caedes", "caedes", "pass")
print self.sim.sceneClear("d9d1b302-5049-452d-b176-3a9561189ca4",
"cube")
print self.sim.sceneUpload("d9d1b302-5049-452d-b176-3a9561189ca4",
"cube",
"/home/caedes/groupmembers.zip")
def writeAnimation(self, f, id, name, internal_name):
"""
Write an animation to the avatar file
"""
f.write(indent(1)+'<animation name="'+name+'" ')
f.write('id="'+id+'" internal_name="'+internal_name+'" ')
f.write('looped="1" speedfactor="1.0" ')
if 'walk' in name.lower() or 'run' in name.lower():
f.write('usevelocity="1" ')
f.write('fadein="0.25" ')
f.write('fadeout="0.25" />\n')
def writeAnimations(self, f):
"""
Write all animations to the avatar file
"""
actions = Blender.Armature.NLA.GetActions()
for name, action in actions.items():
if action.name.lower() in self.actions_map:
action_name = self.actions_map[action.name.lower()]
action_uuid = self.action_uuids[action_name]
else:
action_name = action.name
action_uuid = 'not-needed' # has to exist according to manual
self.writeAnimation(f,
action_uuid,
action_name,
action.name)
def writeProperty(self, f, name, value):
"""
Write an avatar property
"""
f.write(indent(1) + '<property name="'+name+'" value="'+value+'" />')
def writeProperties(self, f):
"""
Write all properties
"""
if self.settings['MovementSpeed']:
self.writeProperty(f, 'MovementSpeed', self.settings['MovementSpeed']) # needed??
# automatic ground offset:
# bone which should be adjusted to align with the ground
if self.settings['basebone']:
self.writeProperty(f, 'basebone', self.settings['basebone'])
# avatar skeleton's hierarchy root
if self.settings['rootbone']:
self.writeProperty(f, 'rootbone', self.settings['rootbone'])
# finetuning
if self.settings['baseoffset']:
self.writeProperty(f, 'baseoffset', self.settings['baseoffset'])
return
# parametrized head turning:
if self.settings['headbone']:
self.writeProperty(f, 'headbone', '')
if self.settings['neckbone']:
self.writeProperty(f, 'neckbone', '')
if self.settings['torsobone']:
self.writeProperty(f, 'torsobone', '')
if self.settings['headboneaxis']:
self.writeProperty(f, 'headboneaxis', '') # optional
if self.settings['neckboneaxis']:
self.writeProperty(f, 'neckboneaxis', '') # optional
if self.settings['torsoboneaxis']:
self.writeProperty(f, 'torsoboneaxis', '') # optional
def writeAvatarFile(self, f):
"""
Write an avatar file for the selected mesh.
"""
f.write('<?xml version="1.0" encoding="utf-8" ?>\n')
f.write('<avatar>\n')
f.write(indent(1)+'<version>0.2</version>\n')
f.write(indent(1)+'<base name="default_female" mesh="'+self.settings['mesh_file']+'" />\n')
f.write(indent(1)+'<skeleton name="'+self.settings['skeleton_file']+'" />\n')
#f.write(indent(1)+'<material name="male/Body" />\n')
#f.write(indent(1)+'<material name="male/Face" />\n')
first_face_image = self.getMesh().getData(0, True).faces[0].image
if first_face_image:
texture_name = os.path.basename(first_face_image.getFilename())
else:
texture_name = ''
f.write(indent(1)+'<texture_body name="'+texture_name+'" />\n')
#f.write(indent(1)+'<texture_face name="" />\n')
f.write(indent(1)+'<appearance height="1.800000" weight="1" />\n')
f.write(indent(1)+'<transformation position="%s" rotation="%s" \
scale="%s" />\n' % (self.settings['translation'],
self.settings['rotation'],
self.settings['scale']))
self.writeProperties(f)
self.writeAnimations(f)
f.write('</avatar>')
def createAvatarFile(self, path):
"""
Create the avatar file at the specified location.
"""
character_name = self.settings['character_name']
f = open(os.path.join(path, character_name + '.xml'), 'w')
self.writeAvatarFile(f)
f.close()
def getMesh(self):
"""
Get the selected mesh
"""
selected = Blender.Object.GetSelected()
for sel in selected:
if sel.getType() == 'Mesh':
return sel
def getArmature(self):
"""
Get the selected object's armature
"""
bObject = self.getMesh()
return GetArmatureObject(bObject)
def parseSettings(self, exportSettings):
"""
Decide settings for export
"""
mesh = self.getMesh()
name = mesh.getData(0, True).name
armature_name = self.getArmature().name
self.settings['character_name'] = mesh.name
self.settings['mesh_file'] = name + '.mesh'
self.settings['skeleton_file'] = armature_name + '.skeleton'
self.settings.update(exportSettings.getDict())
def setupAnimations(self):
"""
Setup animations on the ogre exporter.
"""
ogreSelection = self.ogre.meshapp.selectedObjectManager
ogreSelection.updateSelection()
armatureManager = ogreSelection.getArmatureAnimationProxyManager(self.getMesh().getData(True))
armatureManager.removeProxies() # cleanup
armatureManager.animationProxyKeyList = [] # shouldnt be needed
armatureManager.update()
actionList = armatureManager.getActions()
for action in actionList:
bAction = action.bAction
anim = ArmatureAnimationProxy(armatureManager, action,
action.getName(),
action.getFirstFrame(),
action.getLastFrame())
armatureManager.addProxy(anim)
armatureManager.savePackageSettings()
def export(self, path, pack_name, offset, exportSettings):
"""
Export the character and its avatar file.
"""
b2rexpkg.start()
self.setupAnimations()
self.ogre.export(path, pack_name, offset)
self.parseSettings(exportSettings)
self.createAvatarFile(path)
#f = open(os.path.join(path, pack_name + ".uuids"), 'w')
#b2rexpkg.write(f)
#f.close()
|
lgpl-3.0
| -3,429,808,671,769,813,000
| 37.751092
| 102
| 0.564683
| false
| 3.733277
| false
| false
| false
|
ARM-software/bob-build
|
tests/source_encapsulation/gen_fun3.py
|
1
|
2535
|
#!/usr/bin/env python
# Copyright 2020 Arm Limited.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import sys
def check_expected_input(input_files, expected_files):
if len(input_files) != len(expected_files):
print("Length mismatch! Input: {} Expected: {}".format(input_files, expected_files))
sys.exit(1)
for exp in expected_files:
found = False
for inp in input_files:
if inp.endswith(exp):
found = True
break
if not found:
print("Missed expected file '{}' within input {}".format(exp, input_files))
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='''Check whether provided input files match the \
expected ones. Generate fun3.c using input \
from funcs.txt''')
parser.add_argument('--in', dest='input', nargs='+', default=[], required=True,
help='Input file list')
parser.add_argument('--expected', dest='expected', default=[], nargs='+',
required=True, help='Expected input file list')
parser.add_argument('--out', dest='output', action='store', required=True, help='Output file',
type=argparse.FileType('wt'))
args = parser.parse_args()
s = '''
#define FUNCS "%(funcs)s"
int fun3(void)
{
return 0;
}
'''.lstrip()
check_expected_input(args.input, args.expected)
try:
for f in args.input:
filename = os.path.basename(f)
if filename == "funcs.txt":
with open(f, 'r') as infile:
d = {'funcs': infile.read()}
args.output.write((s % d) + '\n')
except IOError as e:
print("Input file couldn't be opened: " + str(e))
sys.exit(1)
if __name__ == "__main__":
main()
|
apache-2.0
| -6,791,542,741,674,078,000
| 32.355263
| 98
| 0.589349
| false
| 4.155738
| false
| false
| false
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/125_IntheNameofEvilPart1/__init__.py
|
1
|
6260
|
import sys
from com.l2scoria.gameserver.datatables import SkillTable
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "125_IntheNameofEvilPart1"
# NPCs
MUSHIKA = 32114
KARAKAWEI = 32117
ULU_KAIMU = 32119
BALU_KAIMU = 32120
CHUTA_KAIMU = 32121
# ITEMS
GAZKH_FRAGMENT = 8782
ORNITHOMIMUS_CLAW = 8779
DEINONYCHUS_BONE = 8780
EPITAPH_OF_WISDOM = 8781
# MOBS
ORNITHOMIMUS = [ 22200,22201,22202,22219,22224,22742,22744 ]
DEINONYCHUS = [ 16067,22203,22204,22205,22220,22225,22743,22745 ]
# DROP
DROP_CHANCE = 30
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [GAZKH_FRAGMENT,ORNITHOMIMUS_CLAW,DEINONYCHUS_BONE,EPITAPH_OF_WISDOM]
def onAdvEvent(self, event, npc, player) :
htmltext = event
st = player.getQuestState(qn)
if not st : return
cond = st.getInt("cond")
if event == "32114-05.htm" :
st.setState(STARTED)
st.set("cond","1")
st.playSound("ItemSound.quest_accept")
elif event == "32114-09.htm" and cond == 1 :
st.set("cond","2")
st.giveItems(GAZKH_FRAGMENT,1)
st.playSound("ItemSound.quest_middle")
elif event == "32117-08.htm" and cond == 2 :
st.set("cond","3")
st.playSound("ItemSound.quest_middle")
elif event == "32117-14.htm" and cond == 4 :
st.set("cond","5")
st.playSound("ItemSound.quest_middle")
elif event == "32119-02.htm" :
st.set("pilar1","0")
elif cond == 5 and event.isdigit() :
correct = st.getInt("pilar1")
st.set("pilar1", str(correct+1))
htmltext = "32119-0"+str(int(event)+2)+".htm"
elif event == "32119-06.htm" and cond == 5 :
if st.getInt("pilar1") < 4 :
htmltext = "32119-00.htm"
st.unset("pilar1")
elif event == "32119-14.htm" and cond == 5 :
st.set("cond","6")
st.playSound("ItemSound.quest_middle")
elif event == "32120-02.htm" :
st.set("pilar2","0")
elif cond == 6 and event.isdigit() :
correct = st.getInt("pilar2")
st.set("pilar2", str(correct+1))
htmltext = "32120-0"+str(int(event)+2)+".htm"
elif event == "32120-06.htm" and cond == 6 :
if st.getInt("pilar2") < 4 :
htmltext = "32120-00.htm"
st.unset("pilar2")
elif event == "32120-15.htm" and cond == 6 :
st.set("cond","7")
st.playSound("ItemSound.quest_middle")
elif event == "32121-02.htm" :
st.set("pilar3","0")
elif cond == 7 and event.isdigit() :
correct = st.getInt("pilar3")
st.set("pilar3", str(correct+1))
htmltext = "32121-0"+str(int(event)+2)+".htm"
elif event == "32121-06.htm" and cond == 7 :
if st.getInt("pilar3") < 4 :
htmltext = "32121-00.htm"
st.unset("pilar3")
elif event == "32121-16.htm" and cond == 7 :
st.set("cond","8")
st.takeItems(GAZKH_FRAGMENT,-1)
st.giveItems(EPITAPH_OF_WISDOM,1)
st.playSound("ItemSound.quest_middle")
return htmltext
def onTalk (self, npc, player) :
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
cond = st.getInt("cond")
npcId = npc.getNpcId()
if npcId == MUSHIKA :
first = player.getQuestState("124_MeetingTheElroki")
if st.getState() == COMPLETED :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif first and first.getState().getName() == 'Completed' and st.getState() == CREATED and player.getLevel() >= 76 :
htmltext = "32114-01.htm"
elif cond == 0 :
htmltext = "32114-00.htm"
elif cond == 1 :
htmltext = "32114-07.htm"
elif cond == 2 :
htmltext = "32114-10.htm"
elif cond >= 3 and cond < 8:
htmltext = "32114-11.htm"
elif cond == 8 :
st.addExpAndSp(859195,86603)
st.unset("cond")
st.unset("pilar1")
st.unset("pilar2")
st.unset("pilar3")
st.setState(COMPLETED)
st.exitQuest(False)
st.playSound("ItemSound.quest_finish")
htmltext = "32114-12.htm"
elif npcId == KARAKAWEI :
if cond == 2 :
htmltext = "32117-01.htm"
elif cond == 3 :
htmltext = "32117-09.htm"
elif cond == 4 :
st.takeItems(ORNITHOMIMUS_CLAW,-1)
st.takeItems(DEINONYCHUS_BONE,-1)
st.playSound("ItemSound.quest_middle")
htmltext = "32117-10.htm"
elif cond == 5 :
htmltext = "32117-15.htm"
elif cond == 6 or cond == 7 :
htmltext = "32117-16.htm"
elif cond == 8 :
htmltext = "32117-17.htm"
elif npcId == ULU_KAIMU :
if cond == 5 :
npc.doCast(SkillTable.getInstance().getInfo(5089,1))
htmltext = "32119-01.htm"
elif cond == 6 :
htmltext = "32119-14.htm"
elif npcId == BALU_KAIMU :
if cond == 6 :
npc.doCast(SkillTable.getInstance().getInfo(5089,1))
htmltext = "32120-01.htm"
elif cond == 7 :
htmltext = "32120-16.htm"
elif npcId == CHUTA_KAIMU :
if cond == 7 :
npc.doCast(SkillTable.getInstance().getInfo(5089,1))
htmltext = "32121-01.htm"
elif cond == 8 :
htmltext = "32121-17.htm"
return htmltext
def onKill(self, npc, player, isPet) :
st = player.getQuestState(qn)
if not st : return
if st.getInt("cond") == 3 :
if npc.getNpcId() in ORNITHOMIMUS :
if st.getQuestItemsCount(ORNITHOMIMUS_CLAW) < 2 :
if st.getRandom(100) < DROP_CHANCE :
st.giveItems(ORNITHOMIMUS_CLAW,1)
st.playSound("ItemSound.quest_itemget")
elif npc.getNpcId() in DEINONYCHUS :
if st.getQuestItemsCount(DEINONYCHUS_BONE) < 2 :
if st.getRandom(100) < DROP_CHANCE :
st.giveItems(DEINONYCHUS_BONE,1)
st.playSound("ItemSound.quest_itemget")
if st.getQuestItemsCount(ORNITHOMIMUS_CLAW) == 2 and st.getQuestItemsCount(DEINONYCHUS_BONE) == 2 :
st.set("cond","4")
st.playSound("ItemSound.quest_middle")
return
QUEST = Quest(125,qn,"The Name of Evil - 1")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(MUSHIKA)
QUEST.addTalkId(MUSHIKA)
QUEST.addTalkId(KARAKAWEI)
QUEST.addTalkId(ULU_KAIMU)
QUEST.addTalkId(BALU_KAIMU)
QUEST.addTalkId(CHUTA_KAIMU)
for i in ORNITHOMIMUS :
QUEST.addKillId(i)
for i in DEINONYCHUS :
QUEST.addKillId(i)
|
gpl-3.0
| 3,013,677,245,302,723,600
| 29.691176
| 118
| 0.652236
| false
| 2.452978
| false
| false
| false
|
wannabeCitizen/projectsystem
|
lib/verify.py
|
1
|
2667
|
"""
For handling permission and verification requests
"""
import json
import datetime
from lib.model import User, Organization, MiniOrganization, IdeaMeta, Project
#Checks if someone is an owner of an organization they are trying to modify
def is_owner(org_id, user_id):
my_org = Organization.objects.get(unique=org_id)
if user_id in my_org.owners:
return True
return False
#Checks if the organization is open, if not, is the member allowed to do this?
def can_add(org_id, user_id):
my_org = Organization.objects.get(unique=org_id)
if my_org.open_org == True:
return True
else:
if user_id in my_org.owners:
return True
return False
#Checks if user is in organization
def is_in_org(user_id, org_id):
if is_owner(org_id, user_id):
return True
my_org = Organization.objects.get(unique=org_id)
if user_id in my_org.members:
return True
return False
def is_idea_owner(idea_id, user_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
if my_idea.created_by == user_id:
return True
else:
return False
def is_thinker(user_id, idea_id, version_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
for versions in my_idea.versions:
if versions.unique == version_id:
my_version = versions
if my_version.thinker == user_id:
return True
else:
return False
def is_commenter(user_id, idea_id, comment_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_commenter = my_idea.comments[comment_id].commenter
if my_commenter == user_id:
return True
else:
return False
def is_replier(user_id, idea_id, comment_id, reply_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_replier = my_idea.comments[comment_id].replies[reply_id].replier
if my_replier == user_id:
return True
else:
return False
def is_project_member(user_id, project_id):
my_project = Project.objects.get(unique=project_id)
if user_id in my_project.members:
return True
return False
def is_project_commenter(user_id, project_id, comment_id):
my_project = Project.objects.get(unique=project_id)
my_commenter = my_project.comments[comment_id].commenter.google_id
if my_commenter == user_id:
return True
else:
return False
def is_project_replier(user_id, project_id, comment_id, reply_id):
my_project = Project.objects.get(unique=project_id)
my_replier = my_project.comments[comment_id].replies[reply_id].replier.google_id
if my_replier == user_id:
return True
else:
return False
|
mit
| 8,657,110,029,647,846,000
| 29.306818
| 84
| 0.665542
| false
| 3.260391
| false
| false
| false
|
credativ/pulp
|
server/test/unit/server/webservices/views/test_consumers.py
|
1
|
76384
|
import json
import unittest
import mock
from django.http import HttpResponseBadRequest
from base import assert_auth_CREATE, assert_auth_DELETE, assert_auth_READ, assert_auth_UPDATE
from pulp.server.exceptions import (InvalidValue, MissingResource, MissingValue,
OperationPostponed, UnsupportedValue)
from pulp.server.managers.consumer import bind
from pulp.server.managers.consumer import profile
from pulp.server.managers.consumer import query
from pulp.server.webservices.views import consumers
from pulp.server.webservices.views import util
from pulp.server.webservices.views.consumers import (ConsumersView, ConsumerBindingsView,
ConsumerBindingResourceView,
ConsumerBindingSearchView,
ConsumerContentActionView,
ConsumerContentApplicabilityView,
ConsumerContentApplicRegenerationView,
ConsumerHistoryView, ConsumerProfilesView,
ConsumerProfileResourceView,
ConsumerProfileSearchView,
ConsumerResourceView,
ConsumerResourceContentApplicRegenerationView,
ConsumerSearchView,
UnitInstallSchedulesView,
UnitInstallScheduleResourceView)
class Test_expand_consumers(unittest.TestCase):
"""
Test that using query params will expand proper consumer info.
"""
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_bind_manager')
def test_expand_consumers(self, mock_factory, mock_serial):
"""
Test for consumer info expansion with details/bindings
"""
consumers_list = [{'id': 'c1'}]
bindings = [{'consumer_id': 'c1', 'repo_id': 'repo1', 'distributor_id': 'dist1'}]
mock_factory.return_value.find_by_criteria.return_value = bindings
mock_serial.serialize.return_value = {'consumer_id': 'c1', 'repo_id': 'repo1',
'distributor_id': 'dist1',
'_href': '/some/c1/some_bind/'}
cons = consumers.expand_consumers(True, False, consumers_list)
expected_cons = [{'id': 'c1', 'bindings': [{'consumer_id': 'c1', 'repo_id': 'repo1',
'distributor_id': 'dist1', '_href': '/some/c1/some_bind/'}]}]
self.assertEqual(cons, expected_cons)
class TestConsumersView(unittest.TestCase):
"""
Test consumers view.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.expand_consumers')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_query_manager')
def test_get_all_consumers(self, mock_factory, mock_resp, mock_expand):
"""
Test the consumers retrieval.
"""
consumer_mock = mock.MagicMock()
resp = [{'id': 'foo', 'display_name': 'bar'}]
consumer_mock.find_all.return_value = resp
mock_factory.return_value = consumer_mock
mock_expand.return_value = resp
request = mock.MagicMock()
request.GET = {}
consumers = ConsumersView()
response = consumers.get(request)
expected_cont = [{'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_all_consumers_details_true(self, mock_factory, mock_resp, mock_serial):
"""
Test the consumers retrieval and include details.
"""
consumer_mock = mock.MagicMock()
resp = [{'id': 'foo', 'display_name': 'bar'}]
consumer_mock.find_all.return_value = resp
mock_factory.consumer_query_manager.return_value = consumer_mock
mock_serial.serialize.return_value = []
mock_factory.consumer_bind_manager.return_value.find_by_criteria.return_value = []
request = mock.MagicMock()
request.GET = {'details': 'true'}
consumers = ConsumersView()
response = consumers.get(request)
expected_cont = [{'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/',
'bindings': []}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_all_consumers_details_false(self, mock_factory, mock_resp):
"""
Test the consumers retrieval and exclude details
"""
consumer_mock = mock.MagicMock()
resp = [{'id': 'foo', 'display_name': 'bar'}]
consumer_mock.find_all.return_value = resp
mock_factory.consumer_query_manager.return_value = consumer_mock
request = mock.MagicMock()
request.GET = {'details': 'false'}
consumers = ConsumersView()
response = consumers.get(request)
expected_cont = [{'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_all_consumers_bindings_true(self, mock_factory, mock_resp, mock_serial):
"""
Test the consumers retrieval and include bindings
"""
consumer_mock = mock.MagicMock()
resp = [{'id': 'foo', 'display_name': 'bar'}]
consumer_mock.find_all.return_value = resp
mock_factory.consumer_query_manager.return_value = consumer_mock
mock_serial.serialize.return_value = []
mock_factory.consumer_bind_manager.return_value.find_by_criteria.return_value = []
request = mock.MagicMock()
request.GET = {'bindings': 'true'}
consumers = ConsumersView()
response = consumers.get(request)
expected_cont = [{'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/',
'bindings': []}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_all_consumers_bindings_false(self, mock_factory, mock_resp):
"""
Test the consumers retrieval and exclude bindings
"""
consumer_mock = mock.MagicMock()
resp = [{'id': 'foo', 'display_name': 'bar'}]
consumer_mock.find_all.return_value = resp
mock_factory.consumer_query_manager.return_value = consumer_mock
request = mock.MagicMock()
request.GET = {'bindings': 'false'}
consumers = ConsumersView()
response = consumers.get(request)
expected_cont = [{'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_all_consumers_bindings_not_boolean(self, mock_factory, mock_resp):
"""
Test the consumers retrieval with invalid boolean query param
"""
consumer_mock = mock.MagicMock()
resp = [{'id': 'foo', 'display_name': 'bar'}]
consumer_mock.find_all.return_value = resp
mock_factory.consumer_query_manager.return_value = consumer_mock
request = mock.MagicMock()
request.GET = {'bindings': 'not_boolean'}
consumers = ConsumersView()
response = consumers.get(request)
expected_cont = [{'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.generate_redirect_response')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_create_consumer(self, mock_factory, mock_resp, mock_redirect):
"""
Test consumer creation.
"""
cons = {'id': 'foo', 'display_name': 'bar'}
cert = '12345'
expected_cont = {'consumer': {'id': 'foo', 'display_name': 'bar',
'_href': '/v2/consumers/foo/'}, 'certificate': '12345'}
request = mock.MagicMock()
request.body = json.dumps({'id': 'foo', 'display_name': 'bar'})
mock_factory.return_value.register.return_value = cons, cert
consumers = ConsumersView()
response = consumers.post(request)
mock_resp.assert_called_once_with(expected_cont)
mock_redirect.assert_called_once_with(mock_resp.return_value,
expected_cont['consumer']['_href'])
self.assertTrue(response is mock_redirect.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
def test_create_consumer_missing_param(self):
"""
Test consumer creation with missing required id.
"""
request = mock.MagicMock()
request.body = json.dumps({'display_name': 'bar'})
consumers = ConsumersView()
try:
response = consumers.post(request)
except MissingValue, response:
pass
else:
raise AssertionError("MissingValue should be raised with missing options")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['id'])
class TestConsumerResourceView(unittest.TestCase):
"""
Test consumer resource view.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_delete_consumer_resource(self, mock_factory, mock_resp):
"""
Test consumer delete resource.
"""
mock_consumer_manager = mock.MagicMock()
mock_factory.consumer_manager.return_value = mock_consumer_manager
mock_consumer_manager.unregister.return_value = None
request = mock.MagicMock()
consumer_resource = ConsumerResourceView()
response = consumer_resource.delete(request, 'test-consumer')
mock_consumer_manager.unregister.assert_called_once_with('test-consumer')
mock_resp.assert_called_once_with(None)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_get_consumer_resource(self, mock_collection, mock_resp):
"""
Test single consumer retrieval.
"""
mock_collection.return_value.get_consumer.return_value = {'id': 'foo'}
request = mock.MagicMock()
request.GET = {}
consumer_resource = ConsumerResourceView()
response = consumer_resource.get(request, 'foo')
expected_cont = {'id': 'foo', '_href': '/v2/consumers/foo/'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_consumer_resource_with_details(self, mock_factory, mock_resp, mock_serial):
"""
Test single consumer retrieval with query param details true
"""
mock_factory.consumer_manager.return_value.get_consumer.return_value = {'id': 'foo'}
mock_serial.serialize.return_value = []
mock_factory.consumer_bind_manager.return_value.find_by_criteria.return_value = []
request = mock.MagicMock()
request.GET = {'details': 'true'}
consumer_resource = ConsumerResourceView()
response = consumer_resource.get(request, 'foo')
expected_cont = {'id': 'foo', '_href': '/v2/consumers/foo/', 'bindings': []}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_consumer_resource_with_bindings(self, mock_factory, mock_resp, mock_serial):
"""
Test single consumer retrieval with query param bindings true
"""
mock_factory.consumer_manager.return_value.get_consumer.return_value = {'id': 'foo'}
mock_serial.serialize.return_value = []
mock_factory.consumer_bind_manager.return_value.find_by_criteria.return_value = []
request = mock.MagicMock()
request.GET = {'bindings': 'true'}
consumer_resource = ConsumerResourceView()
response = consumer_resource.get(request, 'foo')
expected_cont = {'id': 'foo', '_href': '/v2/consumers/foo/', 'bindings': []}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_get_consumer_resource_with_details_false(self, mock_collection, mock_resp):
"""
Test single consumer retrieval with query param details false
"""
mock_collection.return_value.get_consumer.return_value = {'id': 'foo'}
request = mock.MagicMock()
request.GET = {'details': 'false'}
consumer_resource = ConsumerResourceView()
response = consumer_resource.get(request, 'foo')
expected_cont = {'id': 'foo', '_href': '/v2/consumers/foo/'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_get_consumer_resource_with_bindings_false(self, mock_collection, mock_resp):
"""
Test single consumer retrieval with query param bindings false
"""
mock_collection.return_value.get_consumer.return_value = {'id': 'foo'}
request = mock.MagicMock()
request.GET = {'bingings': 'false'}
consumer_resource = ConsumerResourceView()
response = consumer_resource.get(request, 'foo')
expected_cont = {'id': 'foo', '_href': '/v2/consumers/foo/'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_UPDATE())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_update_consumer(self, mock_factory, mock_resp):
"""
Test consumer update.
"""
resp = {'id': 'foo', 'display_name': 'bar'}
expected_cont = {'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/'}
request = mock.MagicMock()
request.body = json.dumps({'delta': {'display_name': 'bar'}})
mock_factory.consumer_manager.return_value.update.return_value = resp
consumer_resource = ConsumerResourceView()
response = consumer_resource.put(request, 'foo')
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
class TestConsumerSearchView(unittest.TestCase):
"""
Test the ConsumerSearchView.
"""
def test_class_attributes(self):
"""
Ensure that the ConsumerSearchView has the correct class attributes.
"""
self.assertEqual(ConsumerSearchView.response_builder,
util.generate_json_response_with_pulp_encoder)
self.assertEqual(ConsumerSearchView.optional_bool_fields, ('details', 'bindings'))
self.assertTrue(isinstance(ConsumerSearchView.manager, query.ConsumerQueryManager))
@mock.patch('pulp.server.webservices.views.consumers.add_link')
@mock.patch('pulp.server.webservices.views.consumers.expand_consumers')
def test_get_results(self, mock_expand, mock_add_link):
"""
Test that results are expanded and serialized.
"""
query = mock.MagicMock()
search_method = mock.MagicMock()
mock_expand.return_value = ['result_1', 'result_2']
options = {'mock': 'options'}
consumer_search = ConsumerSearchView()
serialized_results = consumer_search.get_results(query, search_method, options)
mock_expand.assert_called_once_with(False, False, list(search_method.return_value))
mock_add_link.assert_has_calls([mock.call('result_1'), mock.call('result_2')])
self.assertEqual(serialized_results, mock_expand.return_value)
class TestConsumerBindingsView(unittest.TestCase):
"""
Represents consumers binding.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_consumer_bindings(self, mock_factory, mock_resp, mock_serial):
"""
Test all bindings retrieval
"""
mock_factory.consumer_manager.return_value.get_consumer.return_value = {'id': 'foo'}
bindings = [{'repo_id': 'some-repo', 'consumer_id': 'foo'}]
mock_factory.consumer_bind_manager.return_value.find_by_consumer.return_value = bindings
serial_resp = {'consumer_id': 'foo', 'repo_id': 'some-repo',
'_href': '/v2/consumers/foo/bindings/some-repo/dist1/'}
mock_serial.serialize.return_value = serial_resp
request = mock.MagicMock()
consumer_bindings = ConsumerBindingsView()
response = consumer_bindings.get(request, 'foo')
expected_cont = [{'consumer_id': 'foo', 'repo_id': 'some-repo',
'_href': '/v2/consumers/foo/bindings/some-repo/dist1/'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
class TestConsumerBindingSearchView(unittest.TestCase):
"""
Test the ConsumerBindingSearchView.
"""
def test_class_attributes(self):
"""
Ensure that the ConsumerBindingSearchView has the correct class attributes.
"""
self.assertEqual(ConsumerBindingSearchView.response_builder,
util.generate_json_response_with_pulp_encoder)
self.assertTrue(isinstance(ConsumerBindingSearchView.manager, bind.BindManager))
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_consumer_bindings_by_repoid(self, mock_factory, mock_resp, mock_serial):
"""
Test all bindings retrieval by repo-id
"""
mock_factory.consumer_manager.return_value.get_consumer.return_value = {'id': 'foo'}
bindings = [{'repo_id': 'some-repo', 'consumer_id': 'foo'}]
mock_factory.consumer_bind_manager.return_value.find_by_consumer.return_value = bindings
mock_factory.repo_query_manager.return_value.find_by_id.return_value = 'some-repo'
serial_resp = {'consumer_id': 'foo', 'repo_id': 'some-repo',
'_href': '/v2/consumers/foo/bindings/some-repo/'}
mock_serial.serialize.return_value = serial_resp
request = mock.MagicMock()
consumer_bindings = ConsumerBindingsView()
response = consumer_bindings.get(request, 'foo', 'some-repo')
expected_cont = [{'consumer_id': 'foo', '_href': '/v2/consumers/foo/bindings/some-repo/',
'repo_id': 'some-repo'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_consumer_bindings_invalid_consumer(self, mock_factory):
"""
Test all bindings retrieval invalid consumer
"""
mock_factory.consumer_manager.return_value.get_consumer.side_effect = MissingResource()
request = mock.MagicMock()
consumer_bindings = ConsumerBindingsView()
try:
response = consumer_bindings.get(request, 'nonexistent_id')
except MissingResource, response:
pass
else:
raise AssertionError("MissingResource should be raised with nonexistent consumer_id")
self.assertEqual(response.http_status_code, 404)
self.assertEqual(response.error_data['resources'], {'consumer_id': 'nonexistent_id'})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_consumer_bindings_invalid_repo(self, mock_factory):
"""
Test all bindings retrieval invalid repo
"""
mock_factory.repo_query_manager.return_value.find_by_id.return_value = None
request = mock.MagicMock()
consumer_bindings = ConsumerBindingsView()
try:
response = consumer_bindings.get(request, 'foo', 'some-repo')
except MissingResource, response:
pass
else:
raise AssertionError("MissingResource should be raised with nonexistent repo_id")
self.assertEqual(response.http_status_code, 404)
self.assertEqual(response.error_data['resources'], {'repo_id': 'some-repo'})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.consumer_task.bind')
def test_create_binding_async(self, mock_bind):
"""
Test bind consumer to a repo async task.
"""
request = mock.MagicMock()
request.body = json.dumps({'repo_id': 'xxx', 'distributor_id': 'yyy'})
consumer_bindings = ConsumerBindingsView()
self.assertRaises(OperationPostponed, consumer_bindings.post, request, 'test-consumer')
mock_bind.assert_called_once_with('test-consumer', 'xxx', 'yyy', True, {}, {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.consumer_task.bind')
def test_create_binding_sync(self, mock_bind, mock_resp):
"""
Test bind consumer to a repo sync task(notify_agent is false)
"""
mock_bind.return_value.spawned_tasks = False
mock_bind.return_value.serialize.return_value = {'mock': 'bind'}
request = mock.MagicMock()
request.body = json.dumps({'repo_id': 'xxx', 'distributor_id': 'yyy',
'notify_agent': 'false'})
consumer_bindings = ConsumerBindingsView()
response = consumer_bindings.post(request, 'foo')
expected_cont = {'mock': 'bind'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
mock_bind.assert_called_once_with('foo', 'xxx', 'yyy', 'false', {}, {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
def test_create_binding_with_invalid_binding_config(self):
"""
Test bind consumer to a repo witn invalid binding_config
"""
request = mock.MagicMock()
request.body = json.dumps({'binding_config': []})
consumer_bindings = ConsumerBindingsView()
try:
response = consumer_bindings.post(request, 'test-consumer')
except InvalidValue, response:
pass
else:
raise AssertionError("InvalidValue should be raised with wrong type binding config")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['binding_config'])
class TestConsumerBindingResourceView(unittest.TestCase):
"""
Represents consumers binding resource.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_bind_manager')
def test_get_consumer_binding_resource(self, mock_factory, mock_resp, mock_serial):
"""
Test retrieve single binding
"""
bind_resp = {'repo_id': 'some-repo', 'consumer_id': 'foo'}
mock_factory.return_value.get_bind.return_value = bind_resp
serial_resp = {'consumer_id': 'foo', 'repo_id': 'some-repo',
'_href': '/v2/consumers/foo/bindings/some-repo/dist1/'}
mock_serial.serialize.return_value = serial_resp
request = mock.MagicMock()
consumer_binding = ConsumerBindingResourceView()
response = consumer_binding.get(request, 'foo', 'some-repo', 'dist1')
expected_cont = {'consumer_id': 'foo', 'repo_id': 'some-repo',
'_href': '/v2/consumers/foo/bindings/some-repo/dist1/'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch('pulp.server.webservices.views.consumers.consumer_task.unbind')
def test_delete_binding_async_no_force(self, mock_unbind):
"""
Test consumer binding removal async no force
"""
mock_unbind.return_value.spawned_tasks = True
request = mock.MagicMock()
request.body = json.dumps({})
unbind_view = ConsumerBindingResourceView()
self.assertRaises(OperationPostponed, unbind_view.delete, request,
"consumer_id", "repo_id", "distributor_id")
mock_unbind.assert_called_once_with("consumer_id", "repo_id", "distributor_id", {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch('pulp.server.webservices.views.consumers.consumer_task.force_unbind')
def test_delete_binding_async_yes_force(self, mock_unbind):
"""
Test consumer binding removal async with force.
"""
request = mock.MagicMock()
request.body = json.dumps({'force': True})
unbind_view = ConsumerBindingResourceView()
self.assertRaises(OperationPostponed, unbind_view.delete, request,
"consumer_id", "repo_id", "distributor_id")
mock_unbind.assert_called_once_with("consumer_id", "repo_id", "distributor_id", {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.consumer_task.unbind')
def test_delete_binding_sync_no_force(self, mock_unbind, mock_resp):
"""
Test consumer binding removal sync no force
"""
mock_unbind.return_value.spawned_tasks = False
mock_unbind.return_value.serialize.return_value = {'mock': 'unbind'}
request = mock.MagicMock()
request.body = json.dumps({})
unbind_view = ConsumerBindingResourceView()
response = unbind_view.delete(request, 'foo', 'some-repo', 'dist1')
expected_cont = {'mock': 'unbind'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
mock_unbind.assert_called_once_with('foo', 'some-repo', 'dist1', {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.consumer_task.force_unbind')
def test_delete_binding_sync_yes_force(self, mock_unbind, mock_resp):
"""
Test consumer binding removal sync with force
"""
mock_unbind.return_value.spawned_tasks = False
mock_unbind.return_value.serialize.return_value = {'mock': 'force-unbind'}
request = mock.MagicMock()
request.body = json.dumps({'force': True})
unbind_view = ConsumerBindingResourceView()
response = unbind_view.delete(request, 'foo', 'some-repo', 'dist1')
expected_cont = {'mock': 'force-unbind'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
mock_unbind.assert_called_once_with('foo', 'some-repo', 'dist1', {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
def test_delete_binding_invalid_force_type(self):
"""
Test consumer binding removal with invalid type force
"""
request = mock.MagicMock()
request.body = json.dumps({'force': []})
unbind_view = ConsumerBindingResourceView()
try:
response = unbind_view.delete(request, 'foo', 'some-repo', 'dist1')
except InvalidValue, response:
pass
else:
raise AssertionError("InvalidValue should be raised with wrong type force param")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['force'])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
def test_delete_binding_invalid_options_type(self):
"""
Test consumer binding removal with invalid type options
"""
request = mock.MagicMock()
request.body = json.dumps({'options': []})
unbind_view = ConsumerBindingResourceView()
try:
response = unbind_view.delete(request, 'foo', 'some-repo', 'dist1')
except InvalidValue, response:
pass
else:
raise AssertionError("InvalidValue should be raised with wrong type options param")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['options'])
class TestConsumerContentActionView(unittest.TestCase):
"""
Test Consumer content manipulation.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
def test_consumer_bad_request_content(self):
"""
Test consumer invalid content action.
"""
request = mock.MagicMock()
request.body = json.dumps('')
consumer_content = ConsumerContentActionView()
response = consumer_content.post(request, 'my-consumer', 'no_such_action')
self.assertTrue(isinstance(response, HttpResponseBadRequest))
self.assertEqual(response.status_code, 400)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_consumer_content_install_missing_cons(self, mock_consumer):
"""
Test consumer content installation with missing consumer
"""
mock_consumer.return_value.get_consumer.side_effect = MissingResource()
request = mock.MagicMock()
request.body = json.dumps({"units": [], "options": {}})
consumer_content = ConsumerContentActionView()
try:
response = consumer_content.post(request, 'my-consumer', 'install')
except MissingResource, response:
pass
else:
raise AssertionError('MissingResource should be raised with missing consumer')
self.assertEqual(response.http_status_code, 404)
self.assertEqual(response.error_data['resources'], {'consumer_id': 'my-consumer'})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_consumer_content_install_missing_units(self, mock_consumer):
"""
Test consumer content installation with missing units param
"""
mock_consumer.return_value.get_consumer.return_value = 'my-consumer'
request = mock.MagicMock()
request.body = json.dumps({'options': {}})
consumer_content = ConsumerContentActionView()
try:
response = consumer_content.post(request, 'my-consumer', 'install')
except MissingValue, response:
pass
else:
raise AssertionError('MissingValue should be raised with missing units param')
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['units'])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_consumer_content_install_missing_options(self, mock_consumer):
"""
Test consumer content installation with missing options param
"""
mock_consumer.return_value.get_consumer.return_value = 'my-consumer'
request = mock.MagicMock()
request.body = json.dumps({'units': []})
consumer_content = ConsumerContentActionView()
try:
response = consumer_content.post(request, 'my-consumer', 'install')
except MissingValue, response:
pass
else:
raise AssertionError('MissingValue should be raised with missing options param')
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['options'])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_agent_manager')
def test_consumer_content_install(self, mock_factory, mock_consumer):
"""
Test consumer content installation.
"""
mock_factory.return_value.install_content.return_value.task_id = '1234'
mock_consumer.return_value.get_consumer.return_value = 'my_consumer'
request = mock.MagicMock()
request.body = json.dumps({"units": [], "options": {}})
consumer_content = ConsumerContentActionView()
self.assertRaises(OperationPostponed, consumer_content.post, request,
'my-consumer', 'install')
mock_factory.return_value.install_content.assert_called_once_with(
'my-consumer', [], {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_agent_manager')
def test_consumer_content_update(self, mock_factory, mock_consumer):
"""
Test consumer content update.
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
mock_factory.return_value.update_content.return_value.task_id = '1234'
request = mock.MagicMock()
request.body = json.dumps({"units": [], "options": {}})
consumer_content = ConsumerContentActionView()
self.assertRaises(OperationPostponed, consumer_content.post, request,
'my-consumer', 'update')
mock_factory.return_value.update_content.assert_called_once_with(
'my-consumer', [], {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_agent_manager')
def test_consumer_content_uninstall(self, mock_factory, mock_consumer):
"""
Test consumer content uninstall.
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
mock_factory.return_value.uninstall_content.return_value.task_id = '1234'
request = mock.MagicMock()
request.body = json.dumps({"units": [], "options": {}})
consumer_content = ConsumerContentActionView()
self.assertRaises(OperationPostponed, consumer_content.post, request,
'my-consumer', 'uninstall')
mock_factory.return_value.uninstall_content.assert_called_once_with(
'my-consumer', [], {})
class TestConsumerHistoryView(unittest.TestCase):
"""
Test Consumer history view
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_history_manager')
def test_consumer_history(self, mock_history, mock_consumer, mock_resp):
"""
Test consumer history
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
mock_history.return_value.query.return_value = {'mock': 'some-history'}
request = mock.MagicMock()
consumer_history = ConsumerHistoryView()
request.GET = {}
response = consumer_history.get(request, 'test-consumer')
mock_history.return_value.query.assert_called_once_with(sort='descending', event_type=None,
end_date=None, start_date=None,
consumer_id='test-consumer',
limit=None)
mock_resp.assert_called_once_with({'mock': 'some-history'})
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_history_manager')
def test_consumer_history_with_filters(self, mock_history, mock_consumer, mock_resp):
"""
Test consumer history using filters
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
mock_history.return_value.query.return_value = {'mock': 'some-history'}
request = mock.MagicMock()
consumer_history = ConsumerHistoryView()
request.GET = {'limit': '2', 'event_type': 'registered'}
response = consumer_history.get(request, 'test-consumer')
mock_history.return_value.query.assert_called_once_with(sort='descending', limit=2,
event_type='registered',
end_date=None, start_date=None,
consumer_id='test-consumer')
mock_resp.assert_called_once_with({'mock': 'some-history'})
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_history_manager')
def test_consumer_no_history(self, mock_history, mock_consumer, mock_resp):
"""
Test consumer no history
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
mock_history.return_value.query.return_value = []
request = mock.MagicMock()
consumer_history = ConsumerHistoryView()
request.GET = {}
response = consumer_history.get(request, 'test-consumer')
mock_history.return_value.query.assert_called_once_with(sort='descending', limit=None,
event_type=None,
end_date=None, start_date=None,
consumer_id='test-consumer')
mock_resp.assert_called_once_with([])
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_consumer_history_with_nonint_limit(self, mock_consumer):
"""
Pass an invalid (non-integer) limit parameter.
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
mock_request = mock.MagicMock()
mock_request.GET = {'limit': 'not an int'}
consumer_history = ConsumerHistoryView()
try:
consumer_history.get(mock_request, 'test-consumer')
except InvalidValue, response:
pass
else:
raise AssertionError('InvalidValue should be raised if limit is not an integer')
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['limit'])
class TestConsumerProfilesView(unittest.TestCase):
"""
Represents consumers profiles
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_profile_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_get_consumer_profiles(self, mock_consumer, mock_profile, mock_resp):
"""
Test retrieve consumer profiles
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
resp = [{'some_profile': [], 'consumer_id': 'test-consumer', 'content_type': 'rpm'}]
mock_profile.return_value.get_profiles.return_value = resp
request = mock.MagicMock()
consumer_profiles = ConsumerProfilesView()
response = consumer_profiles.get(request, 'test-consumer')
expected_cont = [{'consumer_id': 'test-consumer', 'some_profile': [],
'_href': '/v2/consumers/test-consumer/profiles/rpm/',
'content_type': 'rpm'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.generate_redirect_response')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_profile_manager')
def test_create_consumer_profile(self, mock_profile, mock_resp, mock_redirect):
"""
Test create consumer profile
"""
resp = {'some_profile': [], 'consumer_id': 'test-consumer', 'content_type': 'rpm'}
mock_profile.return_value.create.return_value = resp
request = mock.MagicMock()
request.body = json.dumps({'content_type': 'rpm', 'profile': []})
consumer_profiles = ConsumerProfilesView()
response = consumer_profiles.post(request, 'test-consumer')
expected_cont = {'consumer_id': 'test-consumer', 'some_profile': [],
'_href': '/v2/consumers/test-consumer/profiles/rpm/',
'content_type': 'rpm'}
mock_resp.assert_called_once_with(expected_cont)
mock_redirect.assert_called_once_with(mock_resp.return_value, expected_cont['_href'])
self.assertTrue(response is mock_redirect.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_profile_manager')
def test_create_consumer_profile_missing_param(self, mock_profile):
"""
Test create consumer profile with missing param
"""
resp = {'some_profile': [], 'consumer_id': 'test-consumer', 'content_type': 'rpm'}
mock_profile.return_value.create.return_value = resp
request = mock.MagicMock()
request.body = json.dumps({'profile': []})
consumer_profiles = ConsumerProfilesView()
try:
response = consumer_profiles.post(request, 'test-consumer')
except MissingValue, response:
pass
else:
raise AssertionError("MissingValue should be raised with missing param")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['content_type'])
class TestConsumerProfileSearchView(unittest.TestCase):
"""
Test the ConsumerProfileSearchView.
"""
def test_class_attributes(self):
"""
Ensure that the ConsumerProfileSearchView has the correct class attributes.
"""
self.assertEqual(ConsumerProfileSearchView.response_builder,
util.generate_json_response_with_pulp_encoder)
self.assertTrue(isinstance(ConsumerProfileSearchView.manager, profile.ProfileManager))
class TestConsumerProfileResourceView(unittest.TestCase):
"""
Represents consumers profile resource
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_profile_manager')
def test_get_consumer_profile(self, mock_profile, mock_resp):
"""
Test retrieve consumer profile
"""
resp = {'some_profile': [], 'consumer_id': 'test-consumer', 'content_type': 'rpm'}
mock_profile.return_value.get_profile.return_value = resp
request = mock.MagicMock()
consumer_profile = ConsumerProfileResourceView()
response = consumer_profile.get(request, 'test-consumer', 'rpm')
expected_cont = {'consumer_id': 'test-consumer', 'some_profile': [],
'_href': '/v2/consumers/test-consumer/profiles/rpm/',
'content_type': 'rpm'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_UPDATE())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_profile_manager')
def test_update_consumer_profile(self, mock_profile, mock_resp):
"""
Test update consumer profile
"""
resp = {'some_profile': ['new_info'], 'consumer_id': 'test-consumer', 'content_type': 'rpm'}
mock_profile.return_value.update.return_value = resp
request = mock.MagicMock()
request.body = json.dumps({'some_profile': ['new_info']})
consumer_profile = ConsumerProfileResourceView()
response = consumer_profile.put(request, 'test-consumer', 'rpm')
expected_cont = {'consumer_id': 'test-consumer', 'some_profile': ['new_info'],
'_href': '/v2/consumers/test-consumer/profiles/rpm/',
'content_type': 'rpm'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_profile_manager')
def test_delete_consumer_profile(self, mock_profile, mock_resp):
"""
Test delete consumer profile
"""
mock_profile.return_value.delete.return_value = None
request = mock.MagicMock()
consumer_profile = ConsumerProfileResourceView()
response = consumer_profile.delete(request, 'test-consumer', 'rpm')
mock_profile.return_value.delete.assert_called_once_with('test-consumer', 'rpm')
mock_resp.assert_called_once_with(None)
self.assertTrue(response is mock_resp.return_value)
class TestConsumerQueryContentApplicabilityView(unittest.TestCase):
"""
Represents consumers content applicability
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.ConsumerContentApplicabilityView')
def test_query_consumer_content_applic_bad_request(self, mock_criteria_types):
"""
Test query consumer content applic. bad request
"""
mock_criteria_types._get_consumer_criteria.side_effect = InvalidValue
request = mock.MagicMock()
request.body = json.dumps({'content_types': ['type1']})
consumer_applic = ConsumerContentApplicabilityView()
response = consumer_applic.post(request)
self.assertTrue(isinstance(response, HttpResponseBadRequest))
self.assertEqual(response.status_code, 400)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.retrieve_consumer_applicability')
@mock.patch('pulp.server.webservices.views.consumers.ConsumerContentApplicabilityView')
def test_query_consumer_content_applic(self, mock_criteria_types, mock_applic, mock_resp):
"""
Test query consumer content applicability
"""
resp = [{'consumers': ['c1', 'c2'],
'applicability': {'content_type_1': ['unit_1', 'unit_3']}}]
mock_criteria_types._get_consumer_criteria.return_value = {'mock': 'some-criteria'}
mock_criteria_types._get_content_types.return_value = {'mock': 'some-content-types'}
mock_applic.return_value = resp
request = mock.MagicMock()
request.body = json.dumps({'criteria': {'filters': {}}, 'content_types': ['type1']})
consumer_applic = ConsumerContentApplicabilityView()
response = consumer_applic.post(request)
mock_resp.assert_called_once_with(resp)
self.assertTrue(response is mock_resp.return_value)
def test_get_consumer_criteria_no_criteria(self):
"""
Test get consumer criteria.
"""
request = mock.MagicMock()
request.body_as_json = {}
consumer_applic = ConsumerContentApplicabilityView()
try:
response = ConsumerContentApplicabilityView._get_consumer_criteria(
consumer_applic, request)
except InvalidValue, response:
pass
else:
raise AssertionError("InvalidValue should be raised with missing param")
self.assertEqual(response.http_status_code, 400)
m = "The input to this method must be a JSON object with a 'criteria' key."
self.assertEqual(response.error_data['property_names'], [m])
def test_get_consumer_criteria_no_content_types(self):
"""
Test get content types
"""
request = mock.MagicMock()
request.body_as_json = {'content_types': 'not_list'}
consumer_applic = ConsumerContentApplicabilityView()
try:
response = ConsumerContentApplicabilityView._get_content_types(
consumer_applic, request)
except InvalidValue, response:
pass
else:
raise AssertionError("InvalidValue should be raised with missing param")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'],
['content_types must index an array.'])
class TestConsumerContentApplicabilityView(unittest.TestCase):
"""
Represents consumers content applicability regeneration
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
def test_post_consumer_content_applic_regen_no_criteria(self):
"""
Test create consumer content applic. regen with no criteria
"""
request = mock.MagicMock()
request.body = json.dumps({})
consumer_applic_regen = ConsumerContentApplicRegenerationView()
try:
response = consumer_applic_regen.post(request)
except MissingValue, response:
pass
else:
raise AssertionError("MissingValue should be raised with missing param")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['consumer_criteria'])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
def test_post_consumer_content_applic_regen_invalid_criteria(self):
"""
Test create consumer content applic. regen with invalid criteria
"""
request = mock.MagicMock()
request.body = json.dumps({'consumer_criteria': []})
consumer_applic_regen = ConsumerContentApplicRegenerationView()
try:
response = consumer_applic_regen.post(request)
except InvalidValue, response:
pass
else:
raise AssertionError("InvalidValue should be raised with missing param")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['consumer_criteria'])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.tags')
@mock.patch('pulp.server.webservices.views.consumers.Criteria.from_client_input')
@mock.patch('pulp.server.webservices.views.consumers.regenerate_applicability_for_consumers')
def test_post_consumer_content_applic_regen(self, mock_applic, mock_criteria, mock_tags):
"""
Test create consumer content applic. regen
"""
mock_task_tags = [mock_tags.action_tag.return_value]
mock_criteria.return_value.as_dict.return_value = {'mock': 'some-criteria'}
request = mock.MagicMock()
request.body = json.dumps({'consumer_criteria': {}})
consumer_applic_regen = ConsumerContentApplicRegenerationView()
try:
consumer_applic_regen.post(request)
except OperationPostponed, response:
pass
else:
raise AssertionError('OperationPostponed should be raised for asynchronous delete.')
self.assertEqual(response.http_status_code, 202)
mock_applic.apply_async_with_reservation.assert_called_with(
mock_tags.RESOURCE_REPOSITORY_PROFILE_APPLICABILITY_TYPE, mock_tags.RESOURCE_ANY_ID,
({'mock': 'some-criteria'},), tags=mock_task_tags)
class TestConsumerResourceContentApplicabilityView(unittest.TestCase):
"""
Represents consumer content applicability regeneration
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_query_manager')
def test_post_consumer_resource_content_applic_regen_no_consumer(self, mock_consumer):
"""
Test create consumer content applic. regen with invalid consumer
"""
mock_consumer.return_value.find_by_id.return_value = None
request = mock.MagicMock()
request.body = json.dumps({})
consumer_applic_regen = ConsumerResourceContentApplicRegenerationView()
try:
response = consumer_applic_regen.post(request, 'c1')
except MissingResource, response:
pass
else:
raise AssertionError("MissingResource should be raised with missing param")
self.assertEqual(response.http_status_code, 404)
self.assertEqual(response.error_data['resources'], {'consumer_id': 'c1'})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.tags')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_query_manager')
@mock.patch('pulp.server.webservices.views.consumers.Criteria')
@mock.patch('pulp.server.webservices.views.consumers.regenerate_applicability_for_consumers')
def test_post_consumer_resource_content_applic_regen(self, mock_applic, mock_criteria,
mock_consumer, mock_tags):
"""
Test create consumer resource content applic. regen
"""
mock_consumer.return_value.find_by_id.return_value = 'c1'
mock_task_tags = [mock_tags.action_tag.return_value]
mock_criteria.return_value.as_dict.return_value = {'mock': 'some-criteria'}
request = mock.MagicMock()
request.body = json.dumps({})
consumer_applic_regen = ConsumerResourceContentApplicRegenerationView()
try:
consumer_applic_regen.post(request, 'c1')
except OperationPostponed, response:
pass
else:
raise AssertionError('OperationPostponed should be raised for asynchronous delete.')
self.assertEqual(response.http_status_code, 202)
mock_applic.apply_async_with_reservation.assert_called_with(
mock_tags.RESOURCE_CONSUMER_TYPE, 'c1',
({'mock': 'some-criteria'},), tags=mock_task_tags)
class TestConsumerUnitActionSchedulesView(unittest.TestCase):
"""
Test consumer schedule actions
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_schedules(self, mock_factory, mock_resp):
"""
Test consumer's schedules retrieval
"""
mock_consumer_manager = mock.MagicMock()
mock_factory.consumer_manager.return_value = mock_consumer_manager
mock_consumer_manager.get_consumer.return_value = 'c1'
mock_display = mock.MagicMock()
resp = {'_id': 'my-schedule', 'schedule': 'P1D', 'kwargs': {'options': {}, 'units': []}}
mock_display.for_display.return_value = resp
mock_factory.consumer_schedule_manager.return_value.get.return_value = [mock_display]
request = mock.MagicMock()
consumer_schedule = UnitInstallSchedulesView()
response = consumer_schedule.get(request, 'c1')
mock_factory.consumer_schedule_manager.return_value.get.assert_called_once_with(
'c1', 'scheduled_unit_install')
expected_content = [{'_id': 'my-schedule', 'kwargs': {'options': {}, 'units': []},
'_href': '/v2/consumers/c1/schedules/content/install/my-schedule/',
'options': {}, 'units': [], 'schedule': 'P1D'}]
mock_resp.assert_called_once_with(expected_content)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_schedules_missing_consumer(self, mock_factory):
"""
Test consumer's schedules retrieval missing consumer
"""
mock_consumer_manager = mock.MagicMock()
mock_factory.consumer_manager.return_value = mock_consumer_manager
mock_consumer_manager.get_consumer.side_effect = MissingResource()
request = mock.MagicMock()
consumer_schedule = UnitInstallSchedulesView()
try:
response = consumer_schedule.get(request, 'test-consumer')
except MissingResource, response:
pass
else:
raise AssertionError("MissingResource should be raised with missing consumer")
self.assertEqual(response.http_status_code, 404)
self.assertEqual(response.error_data['resources'], {'consumer_id': 'test-consumer'})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.generate_redirect_response')
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_create_schedules(self, mock_factory, mock_resp, mock_redirect):
"""
Test consumer's schedules creation
"""
mock_consumer_manager = mock.MagicMock()
mock_factory.consumer_manager.return_value = mock_consumer_manager
mock_consumer_manager.get_consumer.return_value = 'c1'
mock_consumer_schedule_manager = mock.MagicMock()
mock_factory.consumer_schedule_manager.return_value = mock_consumer_schedule_manager
resp = {'_id': 'some-schedule', 'kwargs': {'options': {}, 'units': []}}
mock_consumer_schedule_manager.create_schedule.return_value.for_display.return_value = resp
request = mock.MagicMock()
request.body = json.dumps({'schedule': 'some-schedule'})
consumer_schedule = UnitInstallSchedulesView()
response = consumer_schedule.post(request, 'c1')
mock_consumer_schedule_manager.create_schedule.assert_called_once_with(
'scheduled_unit_install', 'c1', None, {}, 'some-schedule', None, True)
expected_cont = {'_id': 'some-schedule', 'kwargs': {'options': {}, 'units': []},
'options': {}, 'units': [],
'_href': '/v2/consumers/c1/schedules/content/install/some-schedule/'}
mock_resp.assert_called_once_with(expected_cont)
mock_redirect.assert_called_once_with(mock_resp.return_value, expected_cont['_href'])
self.assertTrue(response is mock_redirect.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_schedule_manager')
def test_create_schedules_unsupported_params(self, mock_consumer):
"""
Test consumer's schedules creation with unsupported param
"""
request = mock.MagicMock()
request.body = json.dumps({'schedule': 'some-schedule', 'unsupported_param': '1234'})
consumer_schedule = UnitInstallSchedulesView()
try:
response = consumer_schedule.post(request, 'test-consumer')
except UnsupportedValue, response:
pass
else:
raise AssertionError("UnsupportedValue should be raised with unsupported keys")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['unsupported_param'])
class TestConsumerUnitActionScheduleResourceView(unittest.TestCase):
"""
Test consumer schedule actions
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_schedule(self, mock_factory, mock_resp):
"""
Test consumer's schedules resource retrieval
"""
mock_id = mock.MagicMock()
resp = {'_id': 'some-schedule', 'schedule': 'P1D', 'kwargs': {'options': {}, 'units': []}}
mock_id.for_display.return_value = resp
mock_id.id = 'some-schedule'
mock_factory.consumer_schedule_manager.return_value.get.return_value = [mock_id]
request = mock.MagicMock()
consumer_schedule = UnitInstallScheduleResourceView()
response = consumer_schedule.get(request, 'c1', 'some-schedule')
mock_factory.consumer_schedule_manager.return_value.get.assert_called_once_with(
'c1', 'scheduled_unit_install')
expected_cont = {'_id': 'some-schedule', 'kwargs': {'options': {}, 'units': []},
'_href': '/v2/consumers/c1/schedules/content/install/some-schedule/',
'options': {}, 'units': [], 'schedule': 'P1D'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_invalid_schedule(self, mock_factory, mock_resp):
"""
Test consumer's invalid schedule resource retrieval
"""
mock_factory.consumer_schedule_manager.return_value.get.return_value = []
request = mock.MagicMock()
consumer_schedule = UnitInstallScheduleResourceView()
try:
response = consumer_schedule.get(request, 'test-consumer', 'some-schedule')
except MissingResource, response:
pass
else:
raise AssertionError("MissingResource should be raised with missing param")
self.assertEqual(response.http_status_code, 404)
self.assertEqual(response.error_data['resources'], {'consumer_id': 'test-consumer',
'schedule_id': 'some-schedule'})
mock_factory.consumer_schedule_manager.return_value.get.assert_called_once_with(
'test-consumer', 'scheduled_unit_install')
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_UPDATE())
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_schedule_manager')
def test_update_schedule(self, mock_factory, mock_resp):
"""
Test consumer's schedules resource update
"""
resp = {'_id': 'some-schedule', 'schedule': 'P1D', 'kwargs': {'options': {}, 'units': []}}
mock_update_schedule = mock.MagicMock()
mock_factory.return_value.update_schedule = mock_update_schedule
mock_update_schedule.return_value.for_display.return_value = resp
request = mock.MagicMock()
request.body = json.dumps({'failure_threshold': '3', 'schedule': 'P1D'})
consumer_schedule = UnitInstallScheduleResourceView()
response = consumer_schedule.put(request, 'c1', 'some-schedule')
mock_update_schedule.assert_called_once_with('c1', 'some-schedule', None, None,
{'failure_threshold': '3',
'iso_schedule': 'P1D'})
expected_cont = {'_id': 'some-schedule', 'kwargs': {'options': {}, 'units': []},
'_href': '/v2/consumers/c1/schedules/content/install/some-schedule/',
'options': {}, 'units': [], 'schedule': 'P1D'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_schedule_manager')
def test_delete_schedule(self, mock_schedule, mock_resp):
"""
Test consumer's schedules resource delete
"""
request = mock.MagicMock()
consumer_schedule = UnitInstallScheduleResourceView()
response = consumer_schedule.delete(request, 'test-consumer', 'some-schedule')
mock_schedule.return_value.delete_schedule.assert_called_once_with(
'test-consumer', 'some-schedule')
mock_resp.assert_called_once_with(None)
self.assertTrue(response is mock_resp.return_value)
class TestConsumerAddLinks(unittest.TestCase):
def test_add_link(self):
"""
Test that the reverse for consumer works correctly.
"""
consumer = {'id': 'my_consumer'}
link = consumers.add_link(consumer)
href = {'_href': '/v2/consumers/my_consumer/'}
expected_cont = {'id': 'my_consumer', '_href': '/v2/consumers/my_consumer/'}
self.assertEqual(link, href)
self.assertEqual(consumer, expected_cont)
def test_add_link_profile(self):
"""
Test that the reverse for consumer profile works correctly.
"""
consumer_profile = {'consumer_id': 'my_consumer', 'content_type': 'rpm'}
link = consumers.add_link_profile(consumer_profile)
href = {'_href': '/v2/consumers/my_consumer/profiles/rpm/'}
expected_cont = {'consumer_id': 'my_consumer', 'content_type': 'rpm',
'_href': '/v2/consumers/my_consumer/profiles/rpm/'}
self.assertEqual(link, href)
self.assertEqual(consumer_profile, expected_cont)
def test_add_link_schedule(self):
"""
Test that the reverse for consumer schedule works correctly.
"""
consumer_id = 'c1'
action_type = 'scheduled_unit_install'
schedule = {'_id': 'schedule-id'}
link = consumers.add_link_schedule(schedule, action_type, consumer_id)
href = {'_href': '/v2/consumers/c1/schedules/content/install/schedule-id/'}
expected_cont = {'_id': 'schedule-id',
'_href': '/v2/consumers/c1/schedules/content/install/schedule-id/'}
self.assertEqual(link, href)
self.assertEqual(schedule, expected_cont)
def test_scheduled_unit_management_obj_structure(self):
"""
Modify scheduled unit management object.
"""
scheduled_call = {'kwargs': {'options': {}, 'units': []}}
expected_structure = {'kwargs': {'options': {}, 'units': []}, 'options': {}, 'units': []}
response = consumers.scheduled_unit_management_obj(scheduled_call)
self.assertEqual(response, expected_structure)
|
gpl-2.0
| -1,395,906,513,170,108,400
| 46.414029
| 100
| 0.639558
| false
| 3.978955
| true
| false
| false
|
pmghalvorsen/gramps_branch
|
gramps/gui/filters/sidebar/_sidebarfilter.py
|
1
|
9432
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from bisect import insort_left
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import Pango
from ... import widgets
from ...dbguielement import DbGUIElement
from gramps.gen.config import config
_RETURN = Gdk.keyval_from_name("Return")
_KP_ENTER = Gdk.keyval_from_name("KP_Enter")
class SidebarFilter(DbGUIElement):
_FILTER_WIDTH = 20
_FILTER_ELLIPSIZE = Pango.EllipsizeMode.END
def __init__(self, dbstate, uistate, namespace):
self.signal_map = {
'tag-add' : self._tag_add,
'tag-delete' : self._tag_delete,
'tag-update' : self._tag_update,
'tag-rebuild' : self._tag_rebuild
}
DbGUIElement.__init__(self, dbstate.db)
self.position = 1
self.vbox = Gtk.VBox()
self.table = Gtk.Table(n_rows=4, n_columns=11)
self.vbox.pack_start(self.table, False, False, 0)
self.table.set_border_width(6)
self.table.set_row_spacings(6)
self.table.set_col_spacing(0, 6)
self.table.set_col_spacing(1, 6)
self.apply_btn = Gtk.Button(stock=Gtk.STOCK_FIND)
self.clear_btn = Gtk.Button()
self._init_interface()
uistate.connect('filters-changed', self.on_filters_changed)
dbstate.connect('database-changed', self._db_changed)
self.uistate = uistate
self.dbstate = dbstate
self.namespace = namespace
self.__tag_list = []
self._tag_rebuild()
def _init_interface(self):
self.create_widget()
self.apply_btn.connect('clicked', self.clicked)
hbox = Gtk.HBox()
hbox.show()
image = Gtk.Image()
image.set_from_stock(Gtk.STOCK_UNDO, Gtk.IconSize.BUTTON)
image.show()
label = Gtk.Label(label=_('Reset'))
label.show()
hbox.pack_start(image, False, False, 0)
hbox.pack_start(label, False, True, 0)
hbox.set_spacing(4)
self.clear_btn.add(hbox)
self.clear_btn.connect('clicked', self.clear)
hbox = Gtk.HButtonBox()
hbox.set_layout(Gtk.ButtonBoxStyle.START)
hbox.set_spacing(6)
hbox.set_border_width(12)
hbox.add(self.apply_btn)
hbox.add(self.clear_btn)
hbox.show()
self.vbox.pack_start(hbox, False, False, 0)
self.vbox.show()
def get_widget(self):
return self.vbox
def create_widget(self):
pass
def clear(self, obj):
pass
def clicked(self, obj):
self.uistate.set_busy_cursor(True)
self.clicked_func()
self.uistate.set_busy_cursor(False)
def clicked_func(self):
pass
def get_filter(self):
pass
def add_regex_entry(self, widget):
hbox = Gtk.HBox()
hbox.pack_start(widget, False, False, 12)
self.vbox.pack_start(hbox, False, False, 0)
def add_text_entry(self, name, widget, tooltip=None):
self.add_entry(name, widget)
widget.connect('key-press-event', self.key_press)
if tooltip:
widget.set_tooltip_text(tooltip)
def key_press(self, obj, event):
if not (event.get_state() & Gdk.ModifierType.CONTROL_MASK):
if event.keyval in (_RETURN, _KP_ENTER):
self.clicked(obj)
return False
def add_entry(self, name, widget):
if name:
self.table.attach(widgets.BasicLabel(name),
1, 2, self.position, self.position+1,
xoptions=Gtk.AttachOptions.FILL, yoptions=0)
self.table.attach(widget, 2, 4, self.position, self.position+1,
xoptions=Gtk.AttachOptions.FILL|Gtk.AttachOptions.EXPAND, yoptions=0)
self.position += 1
def on_filters_changed(self, namespace):
"""
Called when filters are changed.
"""
pass
def _db_changed(self, db):
"""
Called when the database is changed.
"""
self._change_db(db)
self.on_db_changed(db)
self._tag_rebuild()
def on_db_changed(self, db):
"""
Called when the database is changed.
"""
pass
def _connect_db_signals(self):
"""
Connect database signals defined in the signal map.
"""
for sig in self.signal_map:
self.callman.add_db_signal(sig, self.signal_map[sig])
def _tag_add(self, handle_list):
"""
Called when tags are added.
"""
for handle in handle_list:
tag = self.dbstate.db.get_tag_from_handle(handle)
insort_left(self.__tag_list, (tag.get_name(), handle))
self.on_tags_changed([item[0] for item in self.__tag_list])
def _tag_update(self, handle_list):
"""
Called when tags are updated.
"""
for handle in handle_list:
item = [item for item in self.__tag_list if item[1] == handle][0]
self.__tag_list.remove(item)
tag = self.dbstate.db.get_tag_from_handle(handle)
insort_left(self.__tag_list, (tag.get_name(), handle))
self.on_tags_changed([item[0] for item in self.__tag_list])
def _tag_delete(self, handle_list):
"""
Called when tags are deleted.
"""
self.__tag_list = [item for item in self.__tag_list
if item[1] not in handle_list]
self.on_tags_changed([item[0] for item in self.__tag_list])
def _tag_rebuild(self):
"""
Called when the tag list needs to be rebuilt.
"""
self.__tag_list = []
for handle in self.dbstate.db.get_tag_handles(sort_handles=True):
tag = self.dbstate.db.get_tag_from_handle(handle)
self.__tag_list.append((tag.get_name(), handle))
self.on_tags_changed([item[0] for item in self.__tag_list])
def on_tags_changed(self, tag_list):
"""
Called when tags are changed.
"""
pass
def add_filter_entry(self, text, widget):
"""
Adds the text and widget to GUI, with an Edit button.
"""
hbox = Gtk.HBox()
hbox.pack_start(widget, True, True, 0)
hbox.pack_start(widgets.SimpleButton(Gtk.STOCK_EDIT, self.edit_filter),
False, False, 0)
self.add_entry(text, hbox)
def edit_filter(self, obj):
"""
Callback which invokes the EditFilter dialog. Will create new
filter if called if none is selected.
"""
from ...editors import EditFilter
from gramps.gen.filters import FilterList, GenericFilterFactory
from gramps.gen.const import CUSTOM_FILTERS
the_filter = None
filterdb = FilterList(CUSTOM_FILTERS)
filterdb.load()
if self.generic.get_active() != 0:
model = self.generic.get_model()
node = self.generic.get_active_iter()
if node:
sel_filter = model.get_value(node, 1)
# the_filter needs to be a particular object for editor
for filt in filterdb.get_filters(self.namespace):
if filt.get_name() == sel_filter.get_name():
the_filter = filt
else:
the_filter = GenericFilterFactory(self.namespace)()
if the_filter:
EditFilter(self.namespace, self.dbstate, self.uistate, [],
the_filter, filterdb,
selection_callback=self.edit_filter_save)
def edit_filter_save(self, filterdb, filter_name):
"""
If a filter changed, save them all. Reloads, and sets name.
Takes the filter database, and the filter name edited.
"""
from gramps.gen.filters import reload_custom_filters
filterdb.save()
reload_custom_filters()
self.on_filters_changed(self.namespace)
self.set_filters_to_name(filter_name)
def set_filters_to_name(self, filter_name):
"""
Resets the Filter combobox to the edited/saved filter.
"""
liststore = self.generic.get_model()
iter = liststore.get_iter_first()
while iter:
filter = liststore.get_value(iter, 1)
if filter and filter.name == filter_name:
self.generic.set_active_iter(iter)
break
iter = liststore.iter_next(iter)
|
gpl-2.0
| 512,867,680,985,231,900
| 33.423358
| 95
| 0.586514
| false
| 3.759267
| false
| false
| false
|
Bioto/Huuey-python
|
huuey/hue/lights/light.py
|
1
|
2617
|
from huuey.hue.state import State
from huuey.paths import Paths
class Light:
"""
Description:
Holds data for a single Light from the hues API
Attrs:
state: Holds instance of State()
name: Name of the group
modelid: Type of Light
swversion: Software Version
uniqueid: Machine id for light
_id: ID of light
_controller: Reference to main huuey object
_newname: Holds string for updating the name of the group
"""
state = None
name = None
modelid = None
swversion = None
uniqueid = None
_id = None
_controller = None
_newname = None
def __init__(self, obj, controller):
self._controller = controller
self._map(obj)
def _map(self, obj):
"""
Description:
Maps the passed in data to the current object
"""
for key in obj:
if key == "state":
self.state = State(obj[key], self)
else:
setattr(self, key, obj[key])
def getid(self):
return self._id
def delete(self):
"""
Description:
Deletes the Light from the bridge
"""
return self._controller.request(Paths.LightDEL, additional={
'<id>': self._id
})
def setstate(self, obj):
"""
Description:
Updates the state object to prepare for actual request
"""
if 'rename' in obj:
self._newname = obj['rename']
del obj['rename']
self.state.update(obj)
return self
def update(self):
"""
Description:
Sends request to endpoint then pulls updated data
directly from the API
If _newname is set it will send the request to
update the name first
then trigger main request
"""
if self._newname:
self._controller.request(Paths.LightPUT, {
'name': self._newname
}, additional={
'<id>': self._id
})
self._newname = None
self._controller.request(Paths.LightState, self.state.object(),
additional={
'<id>': self._id
})
self.grab()
def grab(self):
"""
Description:
Pulls fresh data from the API
"""
light = self._controller.request(Paths.LightGET, additional={
'<id>': self._id
})
self._map(light)
|
mit
| -8,591,549,799,090,077,000
| 24.163462
| 71
| 0.504776
| false
| 4.689964
| false
| false
| false
|
eviljeff/olympia
|
src/olympia/files/tests/test_admin.py
|
1
|
3053
|
from django.utils.encoding import force_text
from olympia.amo.tests import TestCase, addon_factory, user_factory
from olympia.amo.urlresolvers import reverse
class TestFileAdmin(TestCase):
def setUp(self):
self.list_url = reverse('admin:files_file_changelist')
def test_can_list_files_with_admin_advanced_permission(self):
addon = addon_factory()
file_ = addon.current_version.all_files[0]
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Admin:Advanced')
self.client.login(email=user.email)
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 200
assert str(file_.pk) in force_text(response.content)
def test_can_edit_with_admin_advanced_permission(self):
addon = addon_factory()
file_ = addon.current_version.all_files[0]
detail_url = reverse(
'admin:files_file_change', args=(file_.pk,)
)
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Admin:Advanced')
self.client.login(email=user.email)
response = self.client.get(detail_url, follow=True)
assert response.status_code == 200
assert str(file_.id) in force_text(response.content)
assert not file_.is_webextension
post_data = {
'version': file_.version.pk,
'platform': file_.platform,
'filename': file_.filename,
'size': file_.size,
'hash': 'xxx',
'original_hash': 'xxx',
'status': file_.status,
'original_status': file_.original_status,
}
post_data['is_webextension'] = 'on'
response = self.client.post(detail_url, post_data, follow=True)
assert response.status_code == 200
file_.refresh_from_db()
assert file_.is_webextension
def test_can_not_list_without_admin_advanced_permission(self):
user = user_factory(email='someone@mozilla.com')
self.client.login(email=user.email)
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 403
# Just checking that simply changing the permission resolves
# as wanted
self.grant_permission(user, 'Admin:Advanced')
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 200
def test_detail_view_has_download_link(self):
addon = addon_factory()
file_ = addon.current_version.all_files[0]
detail_url = reverse(
'admin:files_file_change', args=(file_.pk,)
)
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Admin:Advanced')
self.client.login(email=user.email)
response = self.client.get(detail_url, follow=True)
assert response.status_code == 200
expected_url = file_.get_absolute_url(attachment=True)
assert expected_url in force_text(response.content)
|
bsd-3-clause
| 1,612,477,954,414,876,000
| 38.649351
| 71
| 0.63151
| false
| 3.797264
| true
| false
| false
|
sergesyrota/Splunk-license-watchdog
|
splunk-license-watchdog.py
|
1
|
11265
|
#!/usr/bin/env python
##################
#
# DEPENDENCIES
#
# Python 2.6+
# Python packages: sys, getopt, requests, time
# Splunk: 4.2+
#
##################
from __future__ import print_function
#
# CONFIGURATION
#
# Authentication information for your Splunk setup
_splunkUser = "user"
_splunkPass = "pass"
# host and port for Splunk server that has license pool info
_licensingServer = "https://splunk.example.com:8089"
# List of inputs that can be disabled or enabled
# You can get a list by a helpful --discover-inputs=<host> flag
# Update inputList by creating a list with all inputs that should be toggled
# Note that you can include multiple hosts, if youhave multiple indexing heads in the same cluster
# Example: inputList = ['https://example.com:8089/servicesNS/nobody/launcher/data/inputs/tcp/cooked/9997',
# 'https://node2.example.com:8089/servicesNS/nobody/system/data/inputs/monitor/%24SPLUNK_HOME%252Fetc%252Fsplunk.version']
_inputList = []
# Action threshold. When current usage crosses _disableThreshold, listed inputs will be disabled.
# When today's usage will be under _enableThreshold - we're assuming new day has started, and inputs will be enabled
# Consider that 1% is ~15 minutes. Set threshold and schedules accordingly.
# Also make sure that script runs before the time you might run out of quota
_disableThreshold = 90
_enableThreshold = 30
#
# END CONFIGURATION
#
# If you change anything below, make sure you know what you're doing :)
#
# Default debug level
# 0 = Fatal errors (stderr) and action information (-q)
# 1 = Informational messages on steps and statuses
# 2 = Verbose output, with splunk responses (-v)
_debugLevel = 1
licensePoolQuery = '| rest /services/licenser/pools | rename title AS Pool | search [rest /services/licenser/groups | search is_active=1 | eval stack_id=stack_ids | fields stack_id] | eval quota=if(isnull(effective_quota),quota,effective_quota) | eval "Used"=round(used_bytes/1024/1024/1024, 3) | eval "Quota"=round(quota/1024/1024/1024, 3) | fields Pool "Used" "Quota"'
import sys
import getopt
import time
import requests
# Suppressing "InsecureRequestWarning" due to self-signed certificate on Splunk servers
requests.packages.urllib3.disable_warnings()
def main(argv):
# at a minimum, auth token should be set, so let's check it right away
if _splunkUser == "user" and _splunkPass == "pass":
debugPrint("Please update user and password to access your Splunk instance and run this script", 0)
showHelp()
sys.exit(1)
try:
opts, args = getopt.getopt(argv, "hc:d:vqED", ["help", "check-license=", "discover-inputs=", "enable-all", "disable-all"])
except getopt.GetoptError:
showHelp()
sys.exit(2)
# First go through non-action arguments and adjust environment variables, before performing actions that will lead to exit.
global _debugLevel
for opt, arg in opts:
if opt in ('-v'):
_debugLevel = 2
if opt in ('-q'):
_debugLevel = 0
# Separate loop for actions that result in exit
for opt, arg in opts:
if opt in ("-h", "--help"):
showHelp()
sys.exit(0)
if opt in ("-c", "--check-license"):
checkLicense(arg)
sys.exit(0)
if opt in ("-d", "--discover-inputs"):
discoverInputs(arg)
sys.exit(0)
if opt in ("-E", "--enable-all"):
enableInputs()
sys.exit(0)
if opt in ("-D", "--disable-all"):
disableInputs()
sys.exit(0)
# Validate that we have needed configuration
if len(_inputList) == 0:
exit("Please adjust the script with your configuration first. Input list is missing.")
# High level sequence:
# Get license details
# If we're over our license quota - enable all inputs. Might as well catch up today, since we'll have a warning anyways.
# If usage is under "enable" threshold: enable all disabled inputs
# If usage is over "disable" threshold: disable all enabled inputs
usage = getLicenseData(_licensingServer)
debugPrint("Quota: %0.3f; Used: %0.3f (%0.1f%%)" % (usage['Quota'], usage['Used'], usage['PercentUsed']), 1)
if usage['PercentUsed'] > 100:
debugPrint("We're over the quota for today! Enabling all disabled inputs to catch up as much as we can:", 1)
enableInputs()
elif usage['PercentUsed'] < _enableThreshold:
debugPrint("Usage is under threshold; Enabling all disabled inputs:", 1)
enableInputs()
elif usage['PercentUsed'] > _disableThreshold:
debugPrint("Usage is over threshold; Disabling all enabled inputs:", 1)
disableInputs()
sys.exit(0)
def disableInputs():
toggleInputs(False)
def enableInputs():
toggleInputs(True)
def toggleInputs(enable):
# Set variables so that we can use unified piece of code to toggle inputs
if enable:
commandSuffix = '/enable'
messageText = 'enabled'
disabledFlag = False
else:
commandSuffix = '/disable'
messageText = 'disabled'
disabledFlag = True
# Take care of all inputs, and make sure they are not in desired state before requesting a change (and also checking that inputs actually exist)
try:
for inputUrl in _inputList:
inputData = splunkRestRequest(inputUrl + '?output_mode=json')
if inputData['entry'][0]['content']['disabled'] == disabledFlag:
debugPrint("Already %s: %s" % (messageText, inputUrl), 2)
else:
# Changing status requires POST
r = splunkRestRequest(inputUrl + commandSuffix, {'output_mode': 'json'})
# Messages = possible problems. Need to verify
for message in r['messages']:
if message['type'] == 'ERROR':
exit("Error toggling input state: " + message['text'])
# Verify that status is correct now:
if r['entry'][0]['content']['disabled'] != disabledFlag:
exit("Error toggling input: %s; Request OK, but input not %s." % (inputUrl, messageText))
debugPrint("%s: %s" % (messageText, inputUrl), 1)
except IndexError as e:
exit("ERROR wotking with Splunk input toggles; unexpected data: %s" % str(e))
except KeyError as e:
exit("ERROR wotking with Splunk input toggles; unexpected data; key %s does not exist " % str(e))
# Helper function to use during setup. Just displays aggregated license quota and today's usage
def checkLicense(host):
debugPrint("Checking license info on " + host, 0)
data = getLicenseData(host)
debugPrint("Licensing quota: %0.3f GiB" % data['Quota'], 0)
debugPrint("Used today: %0.3f GiB (%0.1f%%)" % (data['Used'], data['PercentUsed']), 0)
# Helper function to use during setup. Just shows all inputs found on Splunk host (indexing head)
def discoverInputs(host):
debugPrint("Discovering inputs at " + host, 0)
data = splunkRestRequest(host + '/servicesNS/' + _splunkUser + '/launcher/data/inputs/all?output_mode=json')
for entry in data['entry']:
# entry will have links. We're interested in seeing ones we can disable and enable, so those are the links we're checking to validate (and skipping the rest)
# then grab entry link itself from "alternate" (so that we can add /disable or /enable later)
if 'enable' in entry['links'] or 'disable' in entry['links']:
status = "Unknown: "
if entry['content']['disabled']:
status = "Disabled: "
else:
status = "Enabled: "
debugPrint(status + host + entry['links']['alternate'], 0)
debugPrint("""
Review links above. Identify which ones you want to disable when you are approaching
license limit, then update top of the file by copying them in there.
Generally, you don't want to disable any internal indexing. You also need to consider if
data loss is what you can tollerate or want to achieve (e.g. disabling file input past its
rotation schedule will lead to loss of data between disabling and enabling). If you're
using Splunk forwarders, though, they have their own cache, so disabling tcp input they
pipe to should be safe.""", 0)
# Runs Splunk query to get license pool information, and aggregate results, presenting only usage/quota information
def getLicenseData(host):
data = splunkQuery(host, licensePoolQuery)
try:
used = float(data['result']['Used'])
quota = float(data['result']['Quota'])
if used < 0 or quota <= 0:
exit("Error getting license data. Invalid response received: %s" % data)
return {'Quota': quota, 'Used': used, 'PercentUsed': 100*used/quota}
except KeyError:
exit("Error getting license data. Invalid response received: %s" % data)
# Generic function to run splunk query on a given node, and parse our JSON response
def splunkQuery(host, query):
debugPrint("Running Splunk query: '%s' on host '%s'" % (query, host), 2)
payload = {'search': query, 'output_mode': 'json', 'exec_mode': 'oneshot'}
return splunkRestRequest(host + '/servicesNS/' + _splunkUser + '/search/search/jobs/export/', payload)
# Data format is always expected to be JSON, so need to make sure it's either in URL explicitly, or in post data when this function is used
def splunkRestRequest(url, postData=None):
try:
# No post means we're making a GET request
if postData is None:
r = requests.get(url, auth=(_splunkUser, _splunkPass), verify=False)
debugPrint(r.text, 2)
return r.json()
else:
r = requests.post(url, postData, auth=(_splunkUser, _splunkPass), verify=False)
debugPrint(r.text, 2)
return r.json()
except requests.exceptions.RequestException as e:
exit("ERROR communicating with Splunk server (%s): %s", (url, str(e)))
def showHelp():
print("""
USAGE: splunk-license-monitor.py [options...]
Running without arguments would execute logic. Helper commands can help with config, but require
authentication variables to be set in the file.
-c/--check-license <url> Attempts to retrieve license information from provided
Splunk node (Requires auth info) protocol://host:port resuired, e.g.:
https://your.server.com:8089
-d/--discover-inputs <url> Discovers all inputs and current states
from provided Splunk node (requires auth parameters to be configured)
protocol://host:port resuired, e.g.:
https://your.server.com:8089
-D/--disable-all Disable all inputs that have been configured
-E/--enable-all Enable all inputs that have been configured
-h/--help This help text
-q Quiet mode (errors only)
-v Verbose output (including Splunk queries
""")
def debugPrint(message, level):
if _debugLevel >= level:
print("%s - %s" % (time.strftime("%Y-%m-%d %H:%M:%S"), message))
def exit(message, retval=1):
print(message, file=sys.stderr)
sys.exit(retval)
main(sys.argv[1:])
|
mit
| -5,825,861,814,609,172,000
| 44.06
| 370
| 0.653262
| false
| 3.908744
| true
| false
| false
|
calico/basenji
|
bin/basenji_fetch_app2.py
|
1
|
10874
|
#!/usr/bin/env python
from optparse import OptionParser
import collections
import functools
import os
import pdb
import sys
import numpy as np
import pandas as pd
import h5py
from google.cloud import bigquery
import dash
import dash_table as dt
import dash.dependencies as dd
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from basenji.sad5 import ChrSAD5
'''
basenji_fetch_app.py
Run a Dash app to enable SAD queries.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <sad_hdf5_path>'
parser = OptionParser(usage)
parser.add_option('-c', dest='chrom_hdf5',
default=False, action='store_true',
help='HDF5 files split by chromosome [Default: %default]')
(options,args) = parser.parse_args()
if len(args) != 1:
parser.error('Must provide SAD HDF5')
else:
sad_h5_path = args[0]
#############################################
# precursors
print('Preparing data...', end='', flush=True)
sad5 = ChrSAD5(sad_h5_path, index_chr=True)
print('DONE.', flush=True)
#############################################
# layout
column_widths = [('SNP',150), ('Association',125),
('Score',125), ('ScoreQ',125), ('R',125),
('Experiment',125), ('Description',200)]
scc = [{'if': {'column_id': cw[0]}, 'width':cw[1]} for cw in column_widths]
app = dash.Dash(__name__)
app.css.append_css({"external_url": "https://codepen.io/chriddyp/pen/bWLwgP.css"})
app.layout = html.Div([
html.Div([
html.H1('Basenji SNP activity difference'),
dcc.Markdown('Instructions...'),
html.Div([
html.Label('Datasets'),
dcc.Dropdown(
id='dataset',
options=[
{'label':'CAGE', 'value':'CAGE'},
{'label':'DNase', 'value':'DNASE'},
{'label':'H3K4me3', 'value':'CHIP:H3K4me3'},
{'label':'All', 'value':'All'}
],
value='CAGE'
)
], style={'width': '250', 'display': 'inline-block'}),
html.Div([
html.Label('Population'),
dcc.Dropdown(
id='population',
options=[
{'label':'-', 'value':'-'},
{'label':'1kG African', 'value':'AFR'},
{'label':'1kG American', 'value':'AMR'},
{'label':'1kG East Asian', 'value':'EAS'},
{'label':'1kG European', 'value':'EUR'},
{'label':'1kG South Asian', 'value':'SAS'}
],
value='-'
)
], style={'width': '250', 'display': 'inline-block'}),
html.Div([
html.Label('SNP ID'),
dcc.Input(id='snp_id', value='rs6656401', type='text'),
html.Button(id='snp_submit', n_clicks=0, children='Submit')
], style={'display': 'inline-block', 'float': 'right'})
], style={
'borderBottom': 'thin lightgrey solid',
'backgroundColor': 'rgb(250, 250, 250)',
'padding': '10px 5px'
}),
dcc.Graph(id='assoc_plot'),
html.Div([
dt.DataTable(
id='table',
data=[],
columns=[{'id':cw[0],'name':cw[0]} for cw in column_widths],
style_cell_conditional=scc,
editable=False,
filtering=True,
sorting=True,
n_fixed_rows=20
)
])
])
# html.Div([
# dt.DataTable(
# id='table',
# data=[],
# columns=[cw[0] for cw in column_widths],
# style_cell_conditional=scc,
# editable=False,
# filtering=True,
# sorting=True,
# n_fixed_rows=20
# )
#############################################
# callback helpers
@memoized
def query_ld(population, snp_id):
try:
sad5.set_population(population)
except ValueError:
print('Population unavailable.', file=sys.stderr)
return pd.DataFrame()
chrm, snp_i = sad5.snp_chr_index(snp_id)
pos = sad5.snp_pos(snp_i, chrm)
if chrm is None:
return pd.DataFrame()
else:
return sad5.emerald_vcf.query_ld(snp_id, chrm, pos, ld_threshold=0.8)
@memoized
def read_sad(chrm, snp_i, verbose=True):
"""Read SAD scores from HDF5 for the given SNP index."""
if verbose:
print('Reading SAD!', file=sys.stderr)
# read SAD
snp_sad = sad5.chr_sad5[chrm][snp_i].astype('float64')
# read percentiles
snp_pct = sad5.chr_sad5[chrm].sad_pct(snp_sad)
return snp_sad, snp_pct
def snp_rows(snp_id, dataset, ld_r2=1., verbose=True):
"""Construct table rows for the given SNP id and its LD set
in the given dataset."""
rows = []
# search for SNP
# chrom, snp_i = snp_indexes.get(snp_id, (None,None))
chrm, snp_i = sad5.snp_chr_index(snp_id)
if chrm is not None:
# SAD
snp_sad, snp_pct = read_sad(chrm, snp_i)
# round floats
snp_sad = np.around(snp_sad,4)
snp_assoc = np.around(snp_sad*ld_r2, 4)
ld_r2_round = np.around(ld_r2, 4)
# extract target scores and info
for ti, tid in enumerate(sad5.target_ids):
if dataset == 'All' or sad5.target_labels[ti].startswith(dataset):
rows.append({
'SNP': snp_id,
'Association': snp_assoc[ti],
'Score': snp_sad[ti],
'ScoreQ': snp_pct[ti],
'R': ld_r2_round,
'Experiment': tid,
'Description': sad5.target_labels[ti]})
elif verbose:
print('Cannot find %s in snp_indexes.' % snp_id)
return rows
def make_data_mask(dataset):
"""Make a mask across targets for the given dataset."""
dataset_mask = []
for ti, tid in enumerate(sad5.target_ids):
if dataset == 'All':
dataset_mask.append(True)
else:
dataset_mask.append(sad5.target_labels[ti].startswith(dataset))
return np.array(dataset_mask, dtype='bool')
def snp_scores(snp_id, dataset, ld_r2=1.):
"""Compute an array of scores for this SNP
in the specified dataset."""
dataset_mask = make_data_mask(dataset)
scores = np.zeros(dataset_mask.sum(), dtype='float64')
# search for SNP
chrm, snp_i = sad5.snp_chr_index(snp_id)
if snp_i is not None:
# read SAD
snp_sad, _ = read_sad(chrm, snp_i)
# filter datasets
snp_sad = snp_sad[dataset_mask]
# add
scores += snp_sad*ld_r2
return scores
#############################################
# callbacks
@app.callback(
dd.Output('table', 'data'),
[dd.Input('snp_submit', 'n_clicks')],
[
dd.State('snp_id','value'),
dd.State('dataset','value'),
dd.State('population','value')
]
)
def update_table(n_clicks, snp_id, dataset, population, verbose=True):
"""Update the table with a new parameter set."""
if verbose:
print('Tabling')
# add snp_id rows
rows = snp_rows(snp_id, dataset)
if population != '-':
df_ld = query_ld(population, snp_id)
for i, v in df_ld.iterrows():
rows += snp_rows(v.snp, dataset, v.r)
return rows
@app.callback(
dd.Output('assoc_plot', 'figure'),
[dd.Input('snp_submit', 'n_clicks')],
[
dd.State('snp_id','value'),
dd.State('dataset','value'),
dd.State('population','value')
]
)
def update_plot(n_clicks, snp_id, dataset, population, verbose=True):
if verbose:
print('Plotting')
target_mask = make_data_mask(dataset)
# add snp_id rows
query_scores = snp_scores(snp_id, dataset)
if population != '-':
df_ld = query_ld(population, snp_id)
for i, v in df_ld.iterrows():
query_scores += snp_scores(v.snp, dataset, v.r)
# sort
sorted_indexes = np.argsort(query_scores)
# range
ymax = np.abs(query_scores).max()
ymax *= 1.2
return {
'data': [go.Scatter(
x=np.arange(len(query_scores)),
y=query_scores[sorted_indexes],
text=sad5.target_ids[target_mask][sorted_indexes],
mode='markers'
)],
'layout': {
'height': 400,
'margin': {'l': 20, 'b': 30, 'r': 10, 't': 10},
'yaxis': {'range': [-ymax,ymax]},
'xaxis': {'range': [-1,1+len(query_scores)]}
}
}
#############################################
# run
app.scripts.config.serve_locally = True
app.run_server(debug=False, port=8787)
class memoized(object):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
apache-2.0
| 4,630,189,006,834,960,000
| 30.068571
| 86
| 0.469101
| false
| 3.84648
| false
| false
| false
|
OpenTTD-Ladder/ladder-web
|
ladder/matchmaking/admin.py
|
1
|
1149
|
from django.contrib import admin
from translations.admin import TranslationInline
from .models import Ladder, LadderTranslation
class LadderTranslationAdmin(TranslationInline):
model = LadderTranslation
class LadderAdmin(admin.ModelAdmin):
inlines = [LadderTranslationAdmin]
fieldsets = (
(None, {'fields': [('max_slots', 'signup_confirm'),]}),
('Dates', {'fields': [('ladder_start', 'ladder_ends'),]}),
('Signup', {'fields': [('signup_start', 'signup_ends'),]}),
('Rating', {'fields': [('default_mu', 'default_draw'),
('default_sigma', 'default_beta'),
'default_tau']})
)
list_display = [
'translation',
'max_slots',
'is_active',
'is_signup',
'signup_confirm',
]
def get_readonly_fields(self, request, obj=None):
if obj is not None and obj.pk:
return self.readonly_fields + ('default_mu', 'default_sigma',
'default_beta', 'default_tau', 'default_draw',)
return self.readonly_fields
admin.site.register(Ladder, LadderAdmin)
|
gpl-2.0
| -7,043,679,489,068,449,000
| 31.857143
| 74
| 0.575283
| false
| 4.017483
| false
| false
| false
|
JshWright/home-assistant
|
homeassistant/components/notify/telegram.py
|
2
|
6078
|
"""
Telegram platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.telegram/
"""
import io
import logging
import urllib
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_DATA, PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import (
CONF_API_KEY, ATTR_LOCATION, ATTR_LATITUDE, ATTR_LONGITUDE)
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['python-telegram-bot==5.3.1']
ATTR_PHOTO = 'photo'
ATTR_KEYBOARD = 'keyboard'
ATTR_DOCUMENT = 'document'
ATTR_CAPTION = 'caption'
ATTR_URL = 'url'
ATTR_FILE = 'file'
ATTR_USERNAME = 'username'
ATTR_PASSWORD = 'password'
CONF_CHAT_ID = 'chat_id'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_CHAT_ID): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Get the Telegram notification service."""
import telegram
try:
chat_id = config.get(CONF_CHAT_ID)
api_key = config.get(CONF_API_KEY)
bot = telegram.Bot(token=api_key)
username = bot.getMe()['username']
_LOGGER.debug("Telegram bot is '%s'", username)
except urllib.error.HTTPError:
_LOGGER.error("Please check your access token")
return None
return TelegramNotificationService(api_key, chat_id)
def load_data(url=None, file=None, username=None, password=None):
"""Load photo/document into ByteIO/File container from a source."""
try:
if url is not None:
# Load photo from URL
if username is not None and password is not None:
req = requests.get(url, auth=(username, password), timeout=15)
else:
req = requests.get(url, timeout=15)
return io.BytesIO(req.content)
elif file is not None:
# Load photo from file
return open(file, "rb")
else:
_LOGGER.warning("Can't load photo no photo found in params!")
except OSError as error:
_LOGGER.error("Can't load photo into ByteIO: %s", error)
return None
class TelegramNotificationService(BaseNotificationService):
"""Implement the notification service for Telegram."""
def __init__(self, api_key, chat_id):
"""Initialize the service."""
import telegram
self._api_key = api_key
self._chat_id = chat_id
self.bot = telegram.Bot(token=self._api_key)
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
import telegram
title = kwargs.get(ATTR_TITLE)
data = kwargs.get(ATTR_DATA)
# Exists data for send a photo/location
if data is not None and ATTR_PHOTO in data:
photos = data.get(ATTR_PHOTO, None)
photos = photos if isinstance(photos, list) else [photos]
for photo_data in photos:
self.send_photo(photo_data)
return
elif data is not None and ATTR_LOCATION in data:
return self.send_location(data.get(ATTR_LOCATION))
elif data is not None and ATTR_DOCUMENT in data:
return self.send_document(data.get(ATTR_DOCUMENT))
elif data is not None and ATTR_KEYBOARD in data:
keys = data.get(ATTR_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
return self.send_keyboard(message, keys)
if title:
text = '{} {}'.format(title, message)
else:
text = message
parse_mode = telegram.parsemode.ParseMode.MARKDOWN
# Send message
try:
self.bot.sendMessage(
chat_id=self._chat_id, text=text, parse_mode=parse_mode)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending message")
def send_keyboard(self, message, keys):
"""Display keyboard."""
import telegram
keyboard = telegram.ReplyKeyboardMarkup([
[key.strip() for key in row.split(",")] for row in keys])
try:
self.bot.sendMessage(
chat_id=self._chat_id, text=message, reply_markup=keyboard)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending message")
def send_photo(self, data):
"""Send a photo."""
import telegram
caption = data.get(ATTR_CAPTION)
# Send photo
try:
photo = load_data(
url=data.get(ATTR_URL),
file=data.get(ATTR_FILE),
username=data.get(ATTR_USERNAME),
password=data.get(ATTR_PASSWORD),
)
self.bot.sendPhoto(
chat_id=self._chat_id, photo=photo, caption=caption)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending photo")
def send_document(self, data):
"""Send a document."""
import telegram
caption = data.get(ATTR_CAPTION)
# send photo
try:
document = load_data(
url=data.get(ATTR_URL),
file=data.get(ATTR_FILE),
username=data.get(ATTR_USERNAME),
password=data.get(ATTR_PASSWORD),
)
self.bot.sendDocument(
chat_id=self._chat_id, document=document, caption=caption)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending document")
def send_location(self, gps):
"""Send a location."""
import telegram
latitude = float(gps.get(ATTR_LATITUDE, 0.0))
longitude = float(gps.get(ATTR_LONGITUDE, 0.0))
# Send location
try:
self.bot.sendLocation(
chat_id=self._chat_id, latitude=latitude, longitude=longitude)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending location")
|
apache-2.0
| -8,644,028,738,815,384,000
| 31.15873
| 78
| 0.607108
| false
| 3.996055
| false
| false
| false
|
wangjeaf/CSSCheckStyle
|
setup.py
|
1
|
2417
|
#!/usr/bin/env python
#
# Copyright 2012 The CSSCheckStyle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
try:
from setuptools import setup
except ImportError:
print '[CKstyle] ERROR please install "easy_install" firstly'
sys.exit(0)
from distutils.command.install_data import install_data
import os
cmdclasses = {'install_data': install_data}
def fullsplit(path, result=None):
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
ckstyle_dir = 'ckstyle'
for dirpath, dirnames, filenames in os.walk(ckstyle_dir):
for i, dirname in enumerate(dirnames):
if dirname.startswith('.') or dirname.startswith('_') : del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
setup(
name = "CSSCheckStyle",
version = '1.0.0',
url = 'https://github.com/wangjeaf/CSSCheckStyle',
author = 'wangjeaf',
author_email = 'wangjeaf@gmail.com',
description = 'Check Code Style and more, for CSS.',
download_url = 'https://github.com/wangjeaf/CSSCheckStyle/archive/master.tar.gz',
install_requires=['python-gflags'],
packages = packages,
cmdclass = cmdclasses,
data_files = data_files,
entry_points = {
'console_scripts': [
'ckstyle = ckstyle.command.index:ckstyle'
]
},
classifiers = ['Intended Audience :: Developers',
'Programming Language :: Python',
'Topic :: Software Development :: CSS'
],
)
|
bsd-3-clause
| -7,363,632,864,279,290,000
| 31.226667
| 85
| 0.65784
| false
| 3.747287
| false
| false
| false
|
ibackus/diskpy
|
diskpy/ICgen/sigma_profile.py
|
1
|
9111
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 23 10:17:53 2014
@author: ibackus
"""
# External modules
import numpy as np
import pynbody
SimArray = pynbody.array.SimArray
# diskpy modules
from diskpy.pdmath import smoothstep
from diskpy.utils import match_units
def make_profile(ICobj):
"""
A wrapper for generating surface density profiles according to the IC object.
Settings for the profile are defined in ICobj.settings. Which profile gets
used is defined by ICobj.settings.sigma.kind
Currently available kinds are:
viscous
powerlaw
MQWS
**RETURNS**
r : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
kind = ICobj.settings.sigma.kind
if kind == 'powerlaw':
r, sigma = powerlaw(ICobj.settings, ICobj.T)
elif (kind == 'mqws') | (kind == 'MQWS'):
r, sigma = MQWS(ICobj.settings, ICobj.T)
elif (kind == 'viscous'):
r, sigma = viscous(ICobj.settings)
elif (kind == 'gaussring'):
r, sigma = gaussian_ring(ICobj.settings)
else:
raise TypeError, 'Could not make profile for kind {0}'.format(kind)
if hasattr(ICobj.settings.sigma, 'innercut'):
sigma = _applycut(r, sigma, ICobj.settings.sigma.innercut, False)
if hasattr(ICobj.settings.sigma, 'outercut'):
sigma = _applycut(r, sigma, ICobj.settings.sigma.outercut, True)
return r, sigma
def _applycut(r, sigma, rcut, outer=True):
"""
Applies a hard cut to a surface density profile (sigma). If outer=True,
sigma = 0 at r > rcut. Otherwise, sigma = 0 at r < rcut. If rcut is
None, inf, or nan no cut is performed.
"""
if rcut is None:
return sigma
elif np.isnan(rcut) or np.isinf(rcut):
return sigma
if outer:
mask = r > rcut
else:
mask = r < rcut
if np.any(mask):
sigma[mask] = 0
return sigma
def gaussian_ring(settings):
"""
Generates a gaussian ring surface density profile according to:
.. math:: \\Sigma = \\Sigma_0 exp(-(R-R_d)^2/2a^2)
.. math:: \\Sigma_0 = M_d/(2\\pi)^{3/2} a R_d
Here we call a the ringwidth.
The max radius is determined automatically
Parameters
----------
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
Returns
-------
R : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
Rd = settings.sigma.Rd
ringwidth = settings.sigma.ringwidth
n_points = settings.sigma.n_points
m_disk = settings.sigma.m_disk
Rmax = (Rd + 5*ringwidth).in_units(Rd.units)
Rmax = max(Rmax, Rd*2.0)
R = SimArray(np.linspace(0, Rmax, n_points), Rd.units)
sigma0 = m_disk / (ringwidth * Rd)
sigma0 *= (2*np.pi)**-1.5
expArg = -(R-Rd)**2 / (2*ringwidth**2)
expArg.convert_units('1')
sigma = sigma0 * np.exp(expArg)
return R, sigma
def viscous(settings):
"""
Generates a surface density profile derived from a self-similarity solution
for a viscous disk, according to:
sigma ~ r^-gamma exp(-r^(2-gamma))
Where r is a dimensionless radius and gamma is a constant less than 2.
Rd (disk radius) is defined as the radius containing 95% of the disk mass
**ARGUMENTS**
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
**RETURNS**
R : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
Rd = settings.sigma.Rd
rin = settings.sigma.rin
rmax = settings.sigma.rmax
n_points = settings.sigma.n_points
gamma = settings.sigma.gamma
m_disk = settings.sigma.m_disk
# Define the fraction of mass contained within Rd
A = 0.95
# Normalization for r
R1 = Rd / (np.log(1/(1-A))**(1/(2-gamma)))
Rmax = rmax * Rd
Rin = rin * Rd
R = np.linspace(0, Rmax, n_points)
r = (R/R1).in_units('1')
sigma = (r**-gamma) * np.exp(-r**(2-gamma)) * (m_disk/(2*np.pi*R1*R1)) * (2-gamma)
# Deal with infinities at the origin with a hard cut off
sigma[0] = sigma[1]
# Apply interior cutoff
cut_mask = R < Rin
if np.any(cut_mask):
sigma[cut_mask] *= smoothstep(r[cut_mask],degree=21,rescale=True)
return R, sigma
def powerlaw(settings, T = None):
"""
Generates a surface density profile according to a powerlaw sigma ~ r^p
with a smooth interior cutoff and smooth exterior exponential cutoff.
**ARGUMENTS**
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
T : callable function
Function that returns temperature of the disk as a function of radius
IF none, a powerlaw temperature is assumed
**RETURNS**
R : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
# Parse settings
Rd = settings.sigma.Rd
rin = settings.sigma.rin
rmax = settings.sigma.rmax
cutlength = settings.sigma.cutlength
Mstar = settings.physical.M
Qmin = settings.sigma.Qmin
n_points = settings.sigma.n_points
m = settings.physical.m
power = settings.sigma.power
gamma = settings.physical.gamma_cs()
if T is None:
# If no callable object to calculate Temperature(R) is provided,
# default to a powerlaw T ~ R^-q
T0 = SimArray([129.0],'K') # Temperature at 1 AU
R0 = SimArray([1.0],'au')
q = 0.59
def T(x):
return T0 * np.power((x/R0).in_units('1'),-q)
Rd = match_units(pynbody.units.au, Rd)[1]
Mstar = match_units(pynbody.units.Msol, Mstar)[1]
# Molecular weight
m = match_units(m, pynbody.units.m_p)[0]
# Maximum R to calculate sigma at (needed for the exponential cutoff region)
Rmax = rmax*Rd
# Q calculation parameters:
G = SimArray([1.0],'G')
kB = SimArray([1.0],'k')
# Initialize stuff
A = SimArray(1.0,'Msol')/(2*np.pi*np.power(Rd,2))
# dflemin3 Nov. 4, 2015
# Made units more explicit via SimArrays
r_units = Rd.units
R = SimArray(np.linspace(0,Rmax,n_points),r_units)
r = R/Rd
# Calculate sigma
# Powerlaw
#dflemin3 edit 06/10/2015: Try powerlaw of the form sigma ~ r^power
sigma = A*np.power(r,power)
sigma[0] = 0.0
# Exterior cutoff
sigma[r>1] *= np.exp(-(r[r>1] - 1)**2 / (2*cutlength**2))
# Interior cutoff
sigma[r<rin] *= smoothstep(r[r<rin],degree=21,rescale=True)
# Calculate Q
Q = np.sqrt(Mstar*gamma*kB*T(R)/(G*m*R**3))/(np.pi*sigma)
Q.convert_units('1')
# Rescale sigma to meet the minimum Q requirement
sigma *= Q.min()/Qmin
# Calculate Q
Q = np.sqrt(Mstar*gamma*kB*T(R)/(G*m*R**3))/(np.pi*sigma)
Q.convert_units('1')
return R, sigma
def MQWS(settings, T):
"""
Generates a surface density profile as the per method used in Mayer, Quinn,
Wadsley, and Stadel 2004
** ARGUMENTS **
NOTE: if units are not supplied, assumed units are AU, Msol
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
T : callable
A function to calculate temperature as a function of radius
** RETURNS **
r : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
# Q calculation parameters:
G = SimArray([1.0],'G')
kB = SimArray([1.0],'k')
# Load in settings
n_points = settings.sigma.n_points
rin = settings.sigma.rin
rout = settings.sigma.rout
rmax = settings.sigma.rmax
Qmin = settings.sigma.Qmin
m = settings.physical.m
Mstar = settings.physical.M
#m_disk = settings.sigma.m_disk
rin = match_units(pynbody.units.au, rin)[1]
rout = match_units(pynbody.units.au, rout)[1]
#m_disk = match_units(pynbody.units.Msol, m_disk)[1]
if rmax is None:
rmax = 2.5 * rout
else:
rmax = match_units(pynbody.units.au, rmax)[1]
r = np.linspace(0, rmax, n_points)
a = (rin/r).in_units('1')
b = (r/rout).in_units('1')
sigma = (np.exp(-a**2 - b**2)/r) * Mstar.units/r.units
# Calculate Q
Q = np.sqrt(Mstar*kB*T(r)/(G*m*r**3))/(np.pi*sigma)
Q.convert_units('1')
sigma *= np.nanmin(Q)/Qmin
# Remove all nans
sigma[np.isnan(sigma)] = 0.0
return r, sigma
|
mit
| -394,328,247,553,542,200
| 25.955621
| 89
| 0.584897
| false
| 3.41236
| false
| false
| false
|
erigones/esdc-ce
|
api/dc/template/api_views.py
|
1
|
3081
|
from django.utils.translation import ugettext_noop as _
from api import status
from api.api_views import APIView
from api.exceptions import PreconditionRequired, ObjectAlreadyExists
from api.task.response import SuccessTaskResponse
from api.utils.db import get_object
from api.dc.utils import remove_dc_binding_virt_object
from api.dc.template.serializers import TemplateSerializer
from api.dc.messages import LOG_TEMPLATE_ATTACH, LOG_TEMPLATE_DETACH
from api.template.messages import LOG_TEMPLATE_UPDATE
from vms.models import VmTemplate
class DcTemplateView(APIView):
serializer = TemplateSerializer
order_by_default = order_by_fields = ('name',)
def __init__(self, request, name, data):
super(DcTemplateView, self).__init__(request)
self.data = data
self.name = name
if name:
attrs = {'name': name}
if request.method != 'POST':
attrs['dc'] = request.dc
self.vmt = get_object(request, VmTemplate, attrs, sr=('owner', 'dc_bound'), exists_ok=True,
noexists_fail=True)
else:
self.vmt = VmTemplate.objects.select_related('owner', 'dc_bound').filter(dc=request.dc)\
.exclude(access__in=VmTemplate.INVISIBLE)\
.order_by(*self.order_by)
def get(self, many=False):
if many or not self.name:
if self.full:
if self.vmt:
res = self.serializer(self.request, self.vmt, many=True).data
else:
res = []
else:
res = list(self.vmt.values_list('name', flat=True))
else:
res = self.serializer(self.request, self.vmt).data
return SuccessTaskResponse(self.request, res)
def _remove_dc_binding(self, res):
if self.vmt.dc_bound:
remove_dc_binding_virt_object(res.data.get('task_id'), LOG_TEMPLATE_UPDATE, self.vmt,
user=self.request.user)
def post(self):
dc, vmt = self.request.dc, self.vmt
if vmt.dc.filter(id=dc.id).exists():
raise ObjectAlreadyExists(model=VmTemplate)
ser = self.serializer(self.request, vmt)
vmt.dc.add(dc)
res = SuccessTaskResponse(self.request, ser.data, obj=vmt, status=status.HTTP_201_CREATED,
detail_dict=ser.detail_dict(), msg=LOG_TEMPLATE_ATTACH)
self._remove_dc_binding(res)
return res
def delete(self):
dc, vmt = self.request.dc, self.vmt
if dc.vm_set.filter(template=vmt).exists():
raise PreconditionRequired(_('Template is used by some VMs'))
ser = self.serializer(self.request, vmt)
vmt.dc.remove(dc)
res = SuccessTaskResponse(self.request, None, obj=vmt, detail_dict=ser.detail_dict(), msg=LOG_TEMPLATE_DETACH)
self._remove_dc_binding(res)
return res
|
apache-2.0
| 5,641,864,116,140,040,000
| 37.037037
| 119
| 0.586173
| false
| 3.919847
| false
| false
| false
|
isralopez/geonode
|
setup.py
|
1
|
5140
|
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
import os
import sys
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils not to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
geonode_dir = 'geonode'
for dirpath, dirnames, filenames in os.walk(geonode_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
setup(name='GeoNode',
version=__import__('geonode').get_version(),
description="Application for serving and sharing geospatial data",
long_description=open('README').read(),
classifiers=[
"Development Status :: 4 - Beta"],
keywords='',
author='GeoNode Developers',
author_email='dev@geonode.org',
url='http://geonode.org',
license='GPL',
packages=packages,
data_files=data_files,
install_requires=[
## The commented name next to the package
## is the Ubuntu 14.04 package that provides it.
## Apps with official Ubuntu 14.04 packages
# native dependencies
"pillow", # python-pillow
"lxml", # python-lxml
# "psycopg2==2.4.5", # python-psycopg2
"Django >=1.6.1, <=1.6.5", # python-django
# Other
"beautifulsoup4==4.2.1", # python-bs4
"MultipartPostHandler==0.1.0", # python-multipartposthandler
"httplib2==0.8", # python-httplib2
"transifex-client==0.10", # transifex-client
"Paver==1.2.1", # python-paver
"nose <=1.0, <=1.3.1", # python-nose
"django-nose==1.2", # python-django-nose
# Django Apps
"django-pagination >=1.0.5, <=1.0.7", # python-django-pagination
"django-jsonfield==0.9.12", # python-django-jsonfield
"django-extensions==1.2.5", # python-django-extensions
"django-taggit==0.12", # python-django-taggit
"django-mptt==0.6.1", # django-mptt
"django-guardian==1.2.0", #django-guardian
# "django-admin-bootstrapped==1.6.5", #django-admin-bootstrapped
## Apps with packages provided in GeoNode's PPA on Launchpad.
"pinax-theme-bootstrap==3.0a11",
"pinax-theme-bootstrap-account==1.0b2",
"django-forms-bootstrap==2.0.3.post1",
"django-friendly-tag-loader==1.1",
"django-taggit-templatetags==0.4.6dev",
"django-activity-stream==0.4.5beta1",
"django-downloadview==1.2",
"django-tastypie==0.11.0",
"django-polymorphic==0.5.3",
"django-leaflet==0.13.2",
"django-autocomplete-light==1.4.13",
"django-modeltranslation",
# GeoNode org maintained apps.
"django-geoexplorer==4.0.4",
"geonode-user-messages==0.1.1",
"geonode-avatar==2.1.3",
"geonode-announcements==1.0.3",
"geonode-agon-ratings==0.3.1",
"geonode-user-accounts==1.0.3",
"geonode-arcrest==10.2",
"geonode-notification==1.1.1",
"geonode-dialogos==0.4",
"gsconfig==0.6.10",
"gsimporter==0.1",
"gisdata==0.5.4",
# GeoPython dependencies
"OWSLib >=0.7.2, <=0.8.7",
"pycsw >=1.6.4, <=1.8.2",
# haystack/elasticsearch, uncomment to use
"django-haystack==2.1.0",
"pyelasticsearch==0.6.1"
],
zip_safe=False,
)
|
gpl-3.0
| -7,154,889,025,022,198,000
| 35.197183
| 104
| 0.609728
| false
| 3.482385
| false
| false
| false
|
hradec/cortex
|
test/IECoreHoudini/FromHoudiniPointsConverter.py
|
1
|
49213
|
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import IECore
import IECoreHoudini
import unittest
import os
import math
class TestFromHoudiniPointsConverter( IECoreHoudini.TestCase ) :
def createBox( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
box = geo.createNode( "box" )
return box
def createTorus( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
torus.parm( "rows" ).set( 10 )
torus.parm( "cols" ).set( 10 )
return torus
def createPoints( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
box = geo.createNode( "box" )
facet = geo.createNode( "facet" )
facet.parm("postnml").set(True)
points = geo.createNode( "scatter" )
points.parm( "npts" ).set( 5000 )
facet.setInput( 0, box )
points.setInput( 0, facet )
return points
# creates a converter
def testCreateConverter( self ) :
box = self.createBox()
converter = IECoreHoudini.FromHoudiniPointsConverter( box )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
return converter
# creates a converter
def testFactory( self ) :
box = self.createBox()
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box, resultType = IECore.TypeId.PointsPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box, resultType = IECore.TypeId.Parameter )
self.assertEqual( converter, None )
self.failUnless( IECore.TypeId.PointsPrimitive in IECoreHoudini.FromHoudiniGeometryConverter.supportedTypes() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( IECore.TypeId.PointsPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( [ IECore.TypeId.PointsPrimitive ] )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
# performs geometry conversion
def testDoConversion( self ) :
converter = self.testCreateConverter()
result = converter.convert()
self.assert_( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
def testConvertFromHOMGeo( self ) :
geo = self.createPoints().geometry()
converter = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo )
self.failUnless( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.failUnless( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
converter2 = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo, IECore.TypeId.PointsPrimitive )
self.failUnless( converter2.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
# convert a mesh
def testConvertMesh( self ) :
torus = self.createTorus()
converter = IECoreHoudini.FromHoudiniPointsConverter( torus )
result = converter.convert()
self.assertEqual( result.typeId(), IECore.PointsPrimitive.staticTypeId() )
bbox = result.bound()
self.assertEqual( bbox.min.x, -2.0 )
self.assertEqual( bbox.max.x, 2.0 )
self.assertEqual( result.numPoints, 100 )
for i in range( result.numPoints ) :
self.assert_( result["P"].data[i].x >= bbox.min.x )
self.assert_( result["P"].data[i].x <= bbox.max.x )
# test prim/vertex attributes
def testConvertPrimVertAttributes( self ) :
torus = self.createTorus()
geo = torus.parent()
# add vertex normals
facet = geo.createNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
facet.setInput( 0, torus )
# add a primitive colour attributes
primcol = geo.createNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
primcol.setInput( 0, facet )
# add a load of different vertex attributes
vert_f1 = geo.createNode( "attribcreate", node_name = "vert_f1", exact_type_name=True )
vert_f1.parm("name").set("vert_f1")
vert_f1.parm("class").set(3)
vert_f1.parm("value1").setExpression("$VTX*0.1")
vert_f1.setInput( 0, primcol )
vert_f2 = geo.createNode( "attribcreate", node_name = "vert_f2", exact_type_name=True )
vert_f2.parm("name").set("vert_f2")
vert_f2.parm("class").set(3)
vert_f2.parm("size").set(2)
vert_f2.parm("value1").setExpression("$VTX*0.1")
vert_f2.parm("value2").setExpression("$VTX*0.1")
vert_f2.setInput( 0, vert_f1 )
vert_f3 = geo.createNode( "attribcreate", node_name = "vert_f3", exact_type_name=True )
vert_f3.parm("name").set("vert_f3")
vert_f3.parm("class").set(3)
vert_f3.parm("size").set(3)
vert_f3.parm("value1").setExpression("$VTX*0.1")
vert_f3.parm("value2").setExpression("$VTX*0.1")
vert_f3.parm("value3").setExpression("$VTX*0.1")
vert_f3.setInput( 0, vert_f2 )
vert_i1 = geo.createNode( "attribcreate", node_name = "vert_i1", exact_type_name=True )
vert_i1.parm("name").set("vert_i1")
vert_i1.parm("class").set(3)
vert_i1.parm("type").set(1)
vert_i1.parm("value1").setExpression("$VTX*0.1")
vert_i1.setInput( 0, vert_f3 )
vert_i2 = geo.createNode( "attribcreate", node_name = "vert_i2", exact_type_name=True )
vert_i2.parm("name").set("vert_i2")
vert_i2.parm("class").set(3)
vert_i2.parm("type").set(1)
vert_i2.parm("size").set(2)
vert_i2.parm("value1").setExpression("$VTX*0.1")
vert_i2.parm("value2").setExpression("$VTX*0.1")
vert_i2.setInput( 0, vert_i1 )
vert_i3 = geo.createNode( "attribcreate", node_name = "vert_i3", exact_type_name=True )
vert_i3.parm("name").set("vert_i3")
vert_i3.parm("class").set(3)
vert_i3.parm("type").set(1)
vert_i3.parm("size").set(3)
vert_i3.parm("value1").setExpression("$VTX*0.1")
vert_i3.parm("value2").setExpression("$VTX*0.1")
vert_i3.parm("value3").setExpression("$VTX*0.1")
vert_i3.setInput( 0, vert_i2 )
vert_v3f = geo.createNode( "attribcreate", node_name = "vert_v3f", exact_type_name=True )
vert_v3f.parm("name").set("vert_v3f")
vert_v3f.parm("class").set(3)
vert_v3f.parm("type").set(2)
vert_v3f.parm("value1").setExpression("$VTX*0.1")
vert_v3f.parm("value2").setExpression("$VTX*0.1")
vert_v3f.parm("value3").setExpression("$VTX*0.1")
vert_v3f.setInput( 0, vert_i3 )
detail_i3 = geo.createNode( "attribcreate", node_name = "detail_i3", exact_type_name=True )
detail_i3.parm("name").set("detail_i3")
detail_i3.parm("class").set(0)
detail_i3.parm("type").set(1)
detail_i3.parm("size").set(3)
detail_i3.parm("value1").set(123)
detail_i3.parm("value2").set(456.789) # can we catch it out with a float?
detail_i3.parm("value3").set(789)
detail_i3.setInput( 0, vert_v3f )
out = geo.createNode( "null", node_name="OUT" )
out.setInput( 0, detail_i3 )
# convert it all
converter = IECoreHoudini.FromHoudiniPointsConverter( out )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.assert_( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
bbox = result.bound()
self.assertEqual( bbox.min.x, -2.0 )
self.assertEqual( bbox.max.x, 2.0 )
self.assertEqual( result.numPoints, 100 )
for i in range( result.numPoints ) :
self.assert_( result["P"].data[i].x >= bbox.min.x )
self.assert_( result["P"].data[i].x <= bbox.max.x )
# test point attributes
self.assert_( "P" in result )
self.assertEqual( result['P'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['P'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['P'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assert_( "N" in result )
self.assertEqual( result['N'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['N'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['N'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
# test detail attributes
self.assert_( "detail_i3" in result )
self.assertEqual( result['detail_i3'].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result['detail_i3'].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result['detail_i3'].data.value.x, 123 )
self.assertEqual( result['detail_i3'].data.value.y, 456 )
self.assertEqual( result['detail_i3'].data.value.z, 789 )
# test primitive attributes
self.assert_( "Cd" not in result )
# test vertex attributes
attrs = [ "vert_f1", "vert_f2", "vert_f3", "vert_i1", "vert_i2", "vert_i3", "vert_v3f" ]
for a in attrs :
self.assert_( a not in result )
self.assert_( result.arePrimitiveVariablesValid() )
# test prim/vertex attributes on a single primitive (mesh)
def testConvertMeshPrimVertAttributes( self ) :
torus = self.createTorus()
torus.parm( "type" ).set( 1 )
geo = torus.parent()
# add vertex normals
facet = geo.createNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
facet.setInput( 0, torus )
# add a primitive colour attributes
primcol = geo.createNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
primcol.setInput( 0, facet )
# add a load of different vertex attributes
vert_f1 = geo.createNode( "attribcreate", node_name = "vert_f1", exact_type_name=True )
vert_f1.parm("name").set("vert_f1")
vert_f1.parm("class").set(3)
vert_f1.parm("value1").setExpression("$VTX*0.1")
vert_f1.setInput( 0, primcol )
vert_f2 = geo.createNode( "attribcreate", node_name = "vert_f2", exact_type_name=True )
vert_f2.parm("name").set("vert_f2")
vert_f2.parm("class").set(3)
vert_f2.parm("size").set(2)
vert_f2.parm("value1").setExpression("$VTX*0.1")
vert_f2.parm("value2").setExpression("$VTX*0.1")
vert_f2.setInput( 0, vert_f1 )
vert_f3 = geo.createNode( "attribcreate", node_name = "vert_f3", exact_type_name=True )
vert_f3.parm("name").set("vert_f3")
vert_f3.parm("class").set(3)
vert_f3.parm("size").set(3)
vert_f3.parm("value1").setExpression("$VTX*0.1")
vert_f3.parm("value2").setExpression("$VTX*0.1")
vert_f3.parm("value3").setExpression("$VTX*0.1")
vert_f3.setInput( 0, vert_f2 )
vert_quat = geo.createNode( "attribcreate", node_name = "vert_quat", exact_type_name=True )
vert_quat.parm("name").set("orient")
vert_quat.parm("class").set(3)
vert_quat.parm("size").set(4)
vert_quat.parm("value1").setExpression("$VTX*0.1")
vert_quat.parm("value2").setExpression("$VTX*0.2")
vert_quat.parm("value3").setExpression("$VTX*0.3")
vert_quat.parm("value4").setExpression("$VTX*0.4")
vert_quat.setInput( 0, vert_f3 )
vert_quat2 = geo.createNode( "attribcreate", node_name = "vert_quat2", exact_type_name=True )
vert_quat2.parm("name").set("quat_2")
vert_quat2.parm("class").set(3)
vert_quat2.parm("size").set(4)
vert_quat2.parm("typeinfo").set(6) # set type info to quaternion
vert_quat2.parm("value1").setExpression("$VTX*0.2")
vert_quat2.parm("value2").setExpression("$VTX*0.4")
vert_quat2.parm("value3").setExpression("$VTX*0.6")
vert_quat2.parm("value4").setExpression("$VTX*0.8")
vert_quat2.setInput( 0, vert_quat )
vert_m44create = geo.createNode( "attribcreate", node_name = "vert_m44create", exact_type_name=True )
vert_m44create.parm("name").set("m44")
vert_m44create.parm("class").set(3)
vert_m44create.parm("size").set(16)
vert_m44create.parm("typeinfo").set(7) # set type info to transformation matrix
vert_m44create.setInput( 0, vert_quat2 )
vert_m44 = geo.createNode( "attribwrangle", node_name = "vert_m44", exact_type_name=True )
vert_m44.parm("snippet").set("4@m44 = maketransform(0,0,{ 10, 20, 30 },{ 30, 45, 60},{ 3, 4, 5 },{ 0, 0, 0 });")
vert_m44.parm("class").set(3)
vert_m44.setInput( 0, vert_m44create )
vert_m33create = geo.createNode( "attribcreate", node_name = "vert_m33create", exact_type_name=True )
vert_m33create.parm("name").set("m33")
vert_m33create.parm("class").set(3)
vert_m33create.parm("size").set(9)
vert_m33create.setInput( 0, vert_m44 )
vert_m33 = geo.createNode( "attribwrangle", node_name = "vert_m33", exact_type_name=True )
vert_m33.parm("snippet").set("3@m33 = matrix3(maketransform(0,0,{ 0, 0, 0 },{ 30, 45, 60},{ 3, 4, 5 },{ 0, 0, 0 }));")
vert_m33.parm("class").set(3)
vert_m33.setInput( 0, vert_m33create )
vert_i1 = geo.createNode( "attribcreate", node_name = "vert_i1", exact_type_name=True )
vert_i1.parm("name").set("vert_i1")
vert_i1.parm("class").set(3)
vert_i1.parm("type").set(1)
vert_i1.parm("value1").setExpression("$VTX*0.1")
vert_i1.setInput( 0, vert_m33 )
vert_i2 = geo.createNode( "attribcreate", node_name = "vert_i2", exact_type_name=True )
vert_i2.parm("name").set("vert_i2")
vert_i2.parm("class").set(3)
vert_i2.parm("type").set(1)
vert_i2.parm("size").set(2)
vert_i2.parm("value1").setExpression("$VTX*0.1")
vert_i2.parm("value2").setExpression("$VTX*0.1")
vert_i2.setInput( 0, vert_i1 )
vert_i3 = geo.createNode( "attribcreate", node_name = "vert_i3", exact_type_name=True )
vert_i3.parm("name").set("vert_i3")
vert_i3.parm("class").set(3)
vert_i3.parm("type").set(1)
vert_i3.parm("size").set(3)
vert_i3.parm("value1").setExpression("$VTX*0.1")
vert_i3.parm("value2").setExpression("$VTX*0.1")
vert_i3.parm("value3").setExpression("$VTX*0.1")
vert_i3.setInput( 0, vert_i2 )
vert_v3f = geo.createNode( "attribcreate", node_name = "vert_v3f", exact_type_name=True )
vert_v3f.parm("name").set("vert_v3f")
vert_v3f.parm("class").set(3)
vert_v3f.parm("type").set(2)
vert_v3f.parm("value1").setExpression("$VTX*0.1")
vert_v3f.parm("value2").setExpression("$VTX*0.1")
vert_v3f.parm("value3").setExpression("$VTX*0.1")
vert_v3f.setInput( 0, vert_i3 )
vertString = geo.createNode( "attribcreate", node_name = "vertString", exact_type_name=True )
vertString.parm("name").set("vertString")
vertString.parm("class").set(3)
vertString.parm("type").set(3)
vertString.parm("string").setExpression("'string %06d!' % pwd().curPoint().number()", hou.exprLanguage.Python)
vertString.setInput( 0, vert_v3f )
vertString2 = geo.createNode( "attribcreate", node_name = "vertString2", exact_type_name=True )
vertString2.parm("name").set("vertString2")
vertString2.parm("class").set(3)
vertString2.parm("type").set(3)
vertString2.parm("string").setExpression("vals = [ 'd','c','e','a','g','f','b' ]\nreturn vals[ pwd().curPoint().number() % 7 ]", hou.exprLanguage.Python)
vertString2.setInput( 0, vertString )
vert_iList = geo.createNode( "attribwrangle", node_name = "vert_iList", exact_type_name=True )
vert_iList.parm("snippet").set("int i[];\ni[]@vert_iList = i;")
vert_iList.parm("class").set(3)
vert_iList.setInput( 0, vertString2 )
vert_fList = geo.createNode( "attribwrangle", node_name = "vert_fList", exact_type_name=True )
vert_fList.parm("snippet").set("float f[];\nf[]@vert_fList = f;")
vert_fList.parm("class").set(3)
vert_fList.setInput( 0, vert_iList )
detail_i3 = geo.createNode( "attribcreate", node_name = "detail_i3", exact_type_name=True )
detail_i3.parm("name").set("detail_i3")
detail_i3.parm("class").set(0)
detail_i3.parm("type").set(1)
detail_i3.parm("size").set(3)
detail_i3.parm("value1").set(123)
detail_i3.parm("value2").set(456.789) # can we catch it out with a float?
detail_i3.parm("value3").set(789)
detail_i3.setInput( 0, vert_fList )
detail_m33create = geo.createNode( "attribcreate", node_name = "detail_m33create", exact_type_name=True )
detail_m33create.parm("name").set("detail_m33")
detail_m33create.parm("class").set(0)
detail_m33create.parm("size").set(9)
detail_m33create.setInput( 0, detail_i3 )
detail_m33 = geo.createNode( "attribwrangle", node_name = "detail_m33", exact_type_name=True )
detail_m33.parm("snippet").set("3@detail_m33 = matrix3( maketransform(0,0,{ 10, 20, 30 },{ 30, 45, 60},{ 3, 4, 5 },{ 0, 0, 0 }) );")
detail_m33.parm("class").set(0)
detail_m33.setInput( 0, detail_m33create )
detail_m44create = geo.createNode( "attribcreate", node_name = "detail_m44create", exact_type_name=True )
detail_m44create.parm("name").set("detail_m44")
detail_m44create.parm("class").set(0)
detail_m44create.parm("size").set(16)
detail_m44create.setInput( 0, detail_m33 )
detail_m44 = geo.createNode( "attribwrangle", node_name = "detail_m44", exact_type_name=True )
detail_m44.parm("snippet").set("4@detail_m44 = maketransform(0,0,{ 10, 20, 30 },{ 30, 45, 60},{ 3, 4, 5 },{ 0, 0, 0 });")
detail_m44.parm("class").set(0)
detail_m44.setInput( 0, detail_m44create )
detail_iList = geo.createNode( "attribwrangle", node_name = "detail_iList", exact_type_name=True )
detail_iList.parm("snippet").set("int i[];\ni[]@detail_iList = i;")
detail_iList.parm("class").set(0)
detail_iList.setInput( 0, detail_m44 )
detail_fList = geo.createNode( "attribwrangle", node_name = "detail_fList", exact_type_name=True )
detail_fList.parm("snippet").set("float f[];\nf[]@detail_fList = f;")
detail_fList.parm("class").set(0)
detail_fList.setInput( 0, detail_iList )
out = geo.createNode( "null", node_name="OUT" )
out.setInput( 0, detail_fList )
# convert it all
converter = IECoreHoudini.FromHoudiniPointsConverter( out )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.assert_( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
bbox = result.bound()
self.assertEqual( bbox.min.x, -2.0 )
self.assertEqual( bbox.max.x, 2.0 )
self.assertEqual( result.numPoints, 100 )
for i in range( result.numPoints ) :
self.assert_( result["P"].data[i].x >= bbox.min.x )
self.assert_( result["P"].data[i].x <= bbox.max.x )
# integer and float list attributes are not currently supported, so should not appear in the primitive variable lists:
self.assertTrue( "vert_iList" not in result.keys() )
self.assertTrue( "vert_fList" not in result.keys() )
self.assertTrue( "detail_iList" not in result.keys() )
self.assertTrue( "detail_fList" not in result.keys() )
# test point attributes
self.assert_( "P" in result )
self.assertEqual( result['P'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['P'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['P'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assert_( "N" in result )
self.assertEqual( result['N'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['N'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['N'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
# test detail attributes
self.assert_( "detail_i3" in result )
self.assertEqual( result['detail_i3'].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result['detail_i3'].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result['detail_i3'].data.value.x, 123 )
self.assertEqual( result['detail_i3'].data.value.y, 456 )
self.assertEqual( result['detail_i3'].data.value.z, 789 )
# test primitive attributes
self.assert_( "Cs" in result )
self.assertEqual( result["Cs"].data.typeId(), IECore.TypeId.Color3fVectorData )
self.assertEqual( result["Cs"].interpolation, IECore.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( result["Cs"].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) ) :
for j in range( 0, 3 ) :
self.assert_( result["Cs"].data[i][j] >= 0.0 )
self.assert_( result["Cs"].data[i][j] <= 1.0 )
# test vertex attributes
attrs = [ "vert_f1", "vert_f2", "vert_f3", "orient", "quat_2", "vert_i1", "vert_i2", "vert_i3", "vert_v3f", "vertStringIndices" ]
for a in attrs :
self.assert_( a in result )
self.assertEqual( result[a].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result[a].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["vert_f1"].data.typeId(), IECore.FloatVectorData.staticTypeId() )
self.assertEqual( result["vert_f2"].data.typeId(), IECore.V2fVectorData.staticTypeId() )
self.assertEqual( result["vert_f3"].data.typeId(), IECore.V3fVectorData.staticTypeId() )
self.assertEqual( result["orient"].data.typeId(), IECore.QuatfVectorData.staticTypeId() )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) :
for j in range( 0, 3 ) :
self.assert_( result["vert_f3"].data[i][j] >= 0.0 )
self.assert_( result["vert_f3"].data[i][j] < 400.1 )
self.assertAlmostEqual( result["orient"].data[i][0], i * 0.4,5 )
self.assertAlmostEqual( result["orient"].data[i][1], i * 0.1,5 )
self.assertAlmostEqual( result["orient"].data[i][2], i * 0.2,5 )
self.assertAlmostEqual( result["orient"].data[i][3], i * 0.3,5 )
self.assertAlmostEqual( result["quat_2"].data[i][0], i * 0.8,5 )
self.assertAlmostEqual( result["quat_2"].data[i][1], i * 0.2,5 )
self.assertAlmostEqual( result["quat_2"].data[i][2], i * 0.4,5 )
self.assertAlmostEqual( result["quat_2"].data[i][3], i * 0.6,5 )
self.assertEqual( result["vert_i1"].data.typeId(), IECore.IntVectorData.staticTypeId() )
self.assertEqual( result["vert_i2"].data.typeId(), IECore.V2iVectorData.staticTypeId() )
self.assertEqual( result["vert_i3"].data.typeId(), IECore.V3iVectorData.staticTypeId() )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) :
for j in range( 0, 3 ) :
self.assert_( result["vert_i3"].data[i][j] < 10 )
self.assertEqual( result["vert_v3f"].data.typeId(), IECore.V3fVectorData.staticTypeId() )
self.assertEqual( result["vertString"].data.typeId(), IECore.TypeId.StringVectorData )
self.assertEqual( result["vertString"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["vertStringIndices"].data.typeId(), IECore.TypeId.IntVectorData )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) :
self.assertEqual( result["vertString"].data[i], "string %06d!" % i )
self.assertEqual( result["vertStringIndices"].data[i], i )
# make sure the string tables are alphabetically sorted:
self.assertEqual( result["vertString2"].data, IECore.StringVectorData( ['a','b','c','d','e','f','g'] ) )
stringVals = [ 'd','c','e','a','g','f','b' ]
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) :
self.assertEqual( result["vertString2"].data[ result["vertString2Indices"].data[i] ], stringVals[ i % 7 ] )
self.assertEqual( result["m44"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result["m44"].data.typeId(), IECore.M44fVectorData.staticTypeId() )
matrixScale = IECore.M44f.extractSHRT( result["m44"].data[0] )[0]
matrixRot = IECore.M44f.extractSHRT( result["m44"].data[0] )[2]
matrixTranslation = IECore.M44f.extractSHRT( result["m44"].data[0] )[3]
self.assertEqual( matrixTranslation, IECore.V3f( 10,20,30 ) )
self.assertTrue( matrixRot.equalWithRelError( IECore.V3f( math.pi / 6, math.pi / 4, math.pi / 3 ), 1.e-5 ) )
self.assertTrue( matrixScale.equalWithRelError( IECore.V3f( 3, 4, 5 ), 1.e-5 ) )
self.assertEqual( result["detail_m44"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["detail_m44"].data.typeId(), IECore.M44fData.staticTypeId() )
matrixScale = IECore.M44f.extractSHRT( result["detail_m44"].data.value )[0]
matrixRot = IECore.M44f.extractSHRT( result["detail_m44"].data.value )[2]
matrixTranslation = IECore.M44f.extractSHRT( result["detail_m44"].data.value )[3]
self.assertEqual( matrixTranslation, IECore.V3f( 10,20,30 ) )
self.assertTrue( matrixRot.equalWithRelError( IECore.V3f( math.pi / 6, math.pi / 4, math.pi / 3 ), 1.e-5 ) )
self.assertTrue( matrixScale.equalWithRelError( IECore.V3f( 3, 4, 5 ), 1.e-5 ) )
self.assertEqual( result["m33"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result["m33"].data.typeId(), IECore.M33fVectorData.staticTypeId() )
m3 = result["m33"].data[0]
m4 = IECore.M44f(
m3[(0,0)], m3[(0,1)], m3[(0,2)], 0.0,
m3[(1,0)], m3[(1,1)], m3[(1,2)], 0.0,
m3[(2,0)], m3[(2,1)], m3[(2,2)], 0.0,
0.0, 0.0, 0.0, 1.0
)
matrixScale = IECore.M44f.extractSHRT( m4 )[0]
matrixRot = IECore.M44f.extractSHRT( m4 )[2]
self.assertTrue( matrixRot.equalWithRelError( IECore.V3f( math.pi / 6, math.pi / 4, math.pi / 3 ), 1.e-5 ) )
self.assertTrue( matrixScale.equalWithRelError( IECore.V3f( 3, 4, 5 ), 1.e-5 ) )
self.assertEqual( result["detail_m33"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["detail_m33"].data.typeId(), IECore.M33fData.staticTypeId() )
m3 = result["detail_m33"].data.value
m4 = IECore.M44f(
m3[(0,0)], m3[(0,1)], m3[(0,2)], 0.0,
m3[(1,0)], m3[(1,1)], m3[(1,2)], 0.0,
m3[(2,0)], m3[(2,1)], m3[(2,2)], 0.0,
0.0, 0.0, 0.0, 1.0
)
matrixScale = IECore.M44f.extractSHRT( m4 )[0]
matrixRot = IECore.M44f.extractSHRT( m4 )[2]
self.assertTrue( matrixRot.equalWithRelError( IECore.V3f( math.pi / 6, math.pi / 4, math.pi / 3 ), 1.e-5 ) )
self.assertTrue( matrixScale.equalWithRelError( IECore.V3f( 3, 4, 5 ), 1.e-5 ) )
self.assert_( result.arePrimitiveVariablesValid() )
# convert some points
def testConvertPoints( self ) :
points = self.createPoints()
converter = IECoreHoudini.FromHoudiniPointsConverter( points )
result = converter.convert()
self.assertEqual( result.typeId(), IECore.PointsPrimitive.staticTypeId() )
self.assertEqual( points.parm('npts').eval(), result.numPoints )
self.assert_( "P" in result.keys() )
self.assert_( "N" in result.keys() )
self.assert_( result.arePrimitiveVariablesValid() )
# simple attribute conversion
def testSetupAttributes( self ) :
points = self.createPoints()
geo = points.parent()
attr = geo.createNode( "attribcreate", exact_type_name=True )
attr.setInput( 0, points )
attr.parm("name").set( "test_attribute" )
attr.parm("type").set(0) # float
attr.parm("size").set(1) # 1 element
attr.parm("value1").set(123.456)
attr.parm("value2").set(654.321)
converter = IECoreHoudini.FromHoudiniPointsConverter( attr )
result = converter.convert()
self.assert_( "test_attribute" in result.keys() )
self.assertEqual( result["test_attribute"].data.size(), points.parm('npts').eval() )
self.assert_( result.arePrimitiveVariablesValid() )
return attr
# testing point attributes and types
def testPointAttributes( self ) :
attr = self.testSetupAttributes()
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.FloatVectorData )
self.assert_( result["test_attribute"].data[0] > 123.0 )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # integer
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.IntVectorData )
self.assertEqual( result["test_attribute"].data[0], 123 )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(2) # 2 elementS
attr.parm("value2").set(456.789)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2fVectorData )
self.assertEqual( result["test_attribute"].data[0], IECore.V2f( 123.456, 456.789 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2iVectorData )
self.assertEqual( result["test_attribute"].data[0], IECore.V2i( 123, 456 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(3) # 3 elements
attr.parm("value3").set(999.999)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result["test_attribute"].data[0],IECore.V3f( 123.456, 456.789, 999.999 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3iVectorData )
self.assertEqual( result["test_attribute"].data[0], IECore.V3i( 123, 456, 999 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set( 3 ) # string
attr.parm( "string" ).setExpression("'string %06d!' % pwd().curPoint().number()", hou.exprLanguage.Python)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.StringVectorData )
self.assertEqual( result["test_attribute"].data[10], "string 000010!" )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["test_attributeIndices"].data.typeId(), IECore.TypeId.IntVectorData )
self.assertEqual( result["test_attributeIndices"].data[10], 10 )
self.assertEqual( result["test_attributeIndices"].data.size(), 5000 )
self.assertEqual( result["test_attributeIndices"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
# testing detail attributes and types
def testDetailAttributes( self ) :
attr = self.testSetupAttributes()
attr.parm("class").set(0) # detail attribute
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
attr.parm("value1").set(123.456)
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.FloatData )
self.assert_( result["test_attribute"].data > IECore.FloatData( 123.0 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # integer
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.IntData )
self.assertEqual( result["test_attribute"].data, IECore.IntData( 123 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(2) # 2 elementS
attr.parm("value2").set(456.789)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2fData )
self.assertEqual( result["test_attribute"].data.value, IECore.V2f( 123.456, 456.789 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2iData )
self.assertEqual( result["test_attribute"].data.value, IECore.V2i( 123, 456 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(3) # 3 elements
attr.parm("value3").set(999.999)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3fData )
self.assertEqual( result["test_attribute"].data.value, IECore.V3f( 123.456, 456.789, 999.999 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result["test_attribute"].data.value, IECore.V3i( 123, 456, 999 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set( 3 ) # string
attr.parm( "string" ).set( "string!" )
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.StringData )
self.assertEqual( result["test_attribute"].data.value, "string!" )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
# testing that float[4] doesn't work!
def testFloat4attr( self ) : # we can't deal with float 4's right now
attr = self.testSetupAttributes()
attr.parm("name").set( "test_attribute" )
attr.parm("size").set(4) # 4 elements per point-attribute
converter = IECoreHoudini.FromHoudiniPointsConverter( attr )
result = converter.convert()
self.assert_( "test_attribute" not in result.keys() ) # invalid due to being float[4]
self.assert_( result.arePrimitiveVariablesValid() )
# testing conversion of animating geometry
def testAnimatingGeometry( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
facet = geo.createNode( "facet" )
facet.parm("postnml").set(True)
mountain = geo.createNode( "mountain" )
mountain.parm("offset1").setExpression( "$FF" )
points = geo.createNode( "scatter" )
facet.setInput( 0, torus )
mountain.setInput( 0, facet )
points.setInput( 0, mountain )
converter = IECoreHoudini.FromHoudiniPointsConverter( points )
hou.setFrame(1)
points_1 = converter.convert()
hou.setFrame(2)
converter = IECoreHoudini.FromHoudiniPointsConverter( points )
points_2 = converter.convert()
self.assertNotEqual( points_1["P"].data, points_2["P"].data )
# testing we can handle an object being deleted
def testObjectWasDeleted( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
converter = IECoreHoudini.FromHoudiniPointsConverter( torus )
g1 = converter.convert()
torus.destroy()
g2 = converter.convert()
self.assertEqual( g2, g1 )
self.assertRaises( RuntimeError, IECore.curry( IECoreHoudini.FromHoudiniPointsConverter, torus ) )
# testing we can handle an object being deleted
def testObjectWasDeletedFactory( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( torus )
g1 = converter.convert()
torus.destroy()
g2 = converter.convert()
self.assertEqual( g2, g1 )
self.assertRaises( RuntimeError, IECore.curry( IECoreHoudini.FromHoudiniGeometryConverter.create, torus ) )
# testing converting a Houdini particle primitive with detail and point attribs
def testParticlePrimitive( self ) :
obj = hou.node("/obj")
geo = obj.createNode( "geo", run_init_scripts=False )
popnet = geo.createNode( "popnet" )
location = popnet.createNode( "location" )
detailAttr = popnet.createOutputNode( "attribcreate", exact_type_name=True )
detailAttr.parm("name").set( "float3detail" )
detailAttr.parm("class").set( 0 ) # detail
detailAttr.parm("type").set( 0 ) # float
detailAttr.parm("size").set( 3 ) # 3 elements
detailAttr.parm("value1").set( 1 )
detailAttr.parm("value2").set( 2 )
detailAttr.parm("value3").set( 3 )
pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True )
pointAttr.parm("name").set( "float3point" )
pointAttr.parm("class").set( 2 ) # point
pointAttr.parm("type").set( 0 ) # float
pointAttr.parm("size").set( 3 ) # 3 elements
pointAttr.parm("value1").set( 1 )
pointAttr.parm("value2").set( 2 )
pointAttr.parm("value3").set( 3 )
hou.setFrame( 5 )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( pointAttr )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
points = converter.convert()
self.assertEqual( type(points), IECore.PointsPrimitive )
self.assertEqual( points.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 21 )
self.assertEqual( points["float3detail"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( type(points["float3detail"].data), IECore.V3fData )
self.assert_( points["float3detail"].data.value.equalWithRelError( IECore.V3f( 1, 2, 3 ), 1e-10 ) )
self.assertEqual( type(points["float3point"].data), IECore.V3fVectorData )
self.assertEqual( points["float3point"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
for p in points["float3point"].data :
self.assert_( p.equalWithRelError( IECore.V3f( 1, 2, 3 ), 1e-10 ) )
self.assert_( points.arePrimitiveVariablesValid() )
add = pointAttr.createOutputNode( "add" )
add.parm( "keep" ).set( 1 ) # deletes primitive and leaves points
converter = IECoreHoudini.FromHoudiniPointsConverter( add )
points2 = converter.convert()
del points['generator']
del points['generatorIndices']
del points['born']
del points['source']
self.assertEqual( points2, points )
def testMultipleParticlePrimitives( self ) :
obj = hou.node("/obj")
geo = obj.createNode( "geo", run_init_scripts=False )
popnet = geo.createNode( "popnet" )
fireworks = popnet.createNode( "fireworks" )
hou.setFrame( 15 )
converter = IECoreHoudini.FromHoudiniPointsConverter( popnet )
points = converter.convert()
self.assertEqual( type(points), IECore.PointsPrimitive )
self.assertEqual( points.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 24 )
self.assertEqual( points["accel"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( type(points["accel"].data), IECore.V3fVectorData )
self.assertEqual( points["accel"].data.getInterpretation(), IECore.GeometricData.Interpretation.Vector )
self.assertEqual( points["nextid"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( points["nextid"].data, IECore.IntData( 25 ) )
self.assertTrue( points.arePrimitiveVariablesValid() )
add = popnet.createOutputNode( "add" )
add.parm( "keep" ).set( 1 ) # deletes primitive and leaves points
converter = IECoreHoudini.FromHoudiniPointsConverter( add )
points2 = converter.convert()
# showing that prim attribs don't get converted because the interpolation size doesn't match
self.assertEqual( points2, points )
def testName( self ) :
points = self.createPoints()
particles = points.createOutputNode( "add" )
particles.parm( "addparticlesystem" ).set( True )
name = particles.createOutputNode( "name" )
name.parm( "name1" ).set( "points" )
box = points.parent().createNode( "box" )
name2 = box.createOutputNode( "name" )
name2.parm( "name1" ).set( "box" )
merge = name.createOutputNode( "merge" )
merge.setInput( 1, name2 )
converter = IECoreHoudini.FromHoudiniPointsConverter( merge )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
self.assertFalse( "nameIndices" in result )
# both shapes were converted as one PointsPrimitive
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 5008 )
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 1 )
self.assertTrue( result.arePrimitiveVariablesValid() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( merge, "points" )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
self.assertFalse( "nameIndices" in result )
# only the named points were converted
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 5000 )
self.assertTrue( result.arePrimitiveVariablesValid() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( merge, "box", IECore.TypeId.PointsPrimitive )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
self.assertFalse( "nameIndices" in result )
# only the named points were converted
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 8 )
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 1 )
self.assertTrue( result.arePrimitiveVariablesValid() )
def testAttributeFilter( self ) :
points = self.createPoints()
particles = points.createOutputNode( "add" )
particles.parm( "addparticlesystem" ).set( True )
# add vertex normals
facet = particles.createOutputNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
# add a primitive colour attributes
primcol = facet.createOutputNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
detail = primcol.createOutputNode( "attribcreate", node_name = "detail", exact_type_name=True )
detail.parm("name").set("detailAttr")
detail.parm("class").set(0)
detail.parm("type").set(1)
detail.parm("size").set(3)
detail.parm("value1").set(123)
detail.parm("value2").set(456.789) # can we catch it out with a float?
detail.parm("value3").set(789)
converter = IECoreHoudini.FromHoudiniPointsConverter( detail )
self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "N", "P", "detailAttr", "varmap" ] )
converter.parameters()["attributeFilter"].setTypedValue( "P" )
self.assertEqual( sorted(converter.convert().keys()), [ "P" ] )
converter.parameters()["attributeFilter"].setTypedValue( "* ^N ^varmap" )
self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "P", "detailAttr" ] )
# P must be converted
converter.parameters()["attributeFilter"].setTypedValue( "* ^P" )
self.assertTrue( "P" in converter.convert().keys() )
def testStandardAttributeConversion( self ) :
points = self.createPoints()
color = points.createOutputNode( "color" )
color.parm( "colortype" ).set( 2 )
rest = color.createOutputNode( "rest" )
scale = rest.createOutputNode( "attribcreate" )
scale.parm( "name1" ).set( "pscale" )
scale.parm( "value1v1" ).setExpression( "$PT" )
converter = IECoreHoudini.FromHoudiniPointsConverter( scale )
result = converter.convert()
if hou.applicationVersion()[0] >= 15 :
self.assertEqual( result.keys(), [ "Cs", "N", "P", "Pref", "width" ] )
else :
self.assertEqual( result.keys(), [ "Cs", "N", "P", "Pref", "varmap", "width" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["Pref"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
converter["convertStandardAttributes"].setTypedValue( False )
result = converter.convert()
if hou.applicationVersion()[0] >= 15 :
self.assertEqual( result.keys(), [ "Cd", "N", "P", "pscale", "rest" ] )
else :
self.assertEqual( result.keys(), [ "Cd", "N", "P", "pscale", "rest", "varmap" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["rest"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
| 3,866,630,561,449,466,000
| 46.919182
| 155
| 0.708268
| false
| 3.047433
| true
| false
| false
|
bung87/django-moe-auth
|
setup.py
|
1
|
1126
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# import codecs
# import os
from distutils.core import setup
from setuptools import find_packages
# version_tuple = __import__('django_js_reverse').VERSION
# version = '.'.join([str(v) for v in version_tuple])
setup(
name='django-moe-auth',
version='0.0.1',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: PyPy',
'Framework :: Django',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
],
license='MIT',
description='Javascript url handling for Django that doesn\'t hurt.',
# long_description=read('README.rst'),
author='Bung',
author_email='zh.bung@gmail.com',
url='https://github.com/bung87/django-moe-auth',
download_url='https://github.com/bung87/django-moe-auth',
packages=find_packages(),
install_requires=[
'Django>=1.5',
'mongoengine==0.8.8',
'djangorestframework==3.0.5',
'django-allauth>=0.19.1'
]
)
|
mit
| -3,784,006,114,095,165,400
| 29.432432
| 73
| 0.618117
| false
| 3.597444
| false
| false
| false
|
revdotcom/babelsubs
|
babelsubs/utils.py
|
2
|
7601
|
import re
import bleach
import htmllib
import htmlentitydefs
import formatter
from itertools import chain
from xmlconst import *
DEFAULT_ALLOWED_TAGS = ['i', 'b', 'u']
MULTIPLE_SPACES = re.compile('\s{2,}')
BLANK_CHARS = re.compile('[\n\t\r]*')
# We support unsyced subs, meaning there is not timing data for them
# in which case we flag them with the largest possible time value
UNSYNCED_TIME_FULL = (60 * 60 * 100 * 1000) - 1
# some formats limit hours to 1 digit, so the max available time must
# be adjusted
UNSYNCED_TIME_ONE_HOUR_DIGIT = (60 * 60 * 10 * 1000) - 1000
def unescape_html(s):
p = htmllib.HTMLParser(formatter.NullFormatter() )
# we need to preserve line breaks, nofill makes sure we don't
# loose them
p.nofill = True
p.save_bgn()
p.feed(s)
return p.save_end().strip()
LANG_DIALECT_RE = re.compile(r'(?P<lang_code>[\w]{2,13})(?P<dialect>-[\w]{2,8})?(?P<rest>-[\w]*)?')
def to_bcp47(code):
"""
This is an ugly hack. I should be ashamed, but I'm not.
Implementing BCP47 will be much more work.
The idea is to translate from a lang code unilangs supports
into the bpc47 format. There are cases where this might fail
(as if the dialect code is not recognized by bcp47). For most cases this should be ok.
Simple sanity chech:
assert (unilangs.to_bcp47("en-us"), unilangs.to_bcp47('en'), unilangs.to_bcp47('ug_Arab-cn')) == ('en-US', 'en', 'ug_Arab-CN'
)
"""
if not code:
raise ValueError("No language was passed")
match = LANG_DIALECT_RE.match(code)
if not match:
raise ValueError("%s code does not seem to be a valid language code.")
match_dict = match.groupdict()
return "%s%s%s" % (match_dict['lang_code'],
(match_dict.get('dialect', "") or "").upper(),
match_dict.get('rest', '') or "")
def generate_style_map(dom):
'''
Parse the head.styling node on the xml and generate a hash -> list
of styles that require our supported formatting optins (bold and
italic for now).
eg.
style_map = {
'italic': ['speaker', 'importante'],
'bold': [],
}
This will be used when parsing each text node to make sure
we can convert to our own styling markers.
'''
style_map = {
'italic': [],
'bold': [],
}
styling_nodes = dom.getElementsByTagName("styling")
style_nodes = chain.from_iterable([x.getElementsByTagName('style') for x in styling_nodes])
for style_node in style_nodes:
style_id = style_node.getAttribute('xml:id')
for key in style_node.attributes.keys():
value = style_node.attributes[key].value
if key == 'tts:fontWeight' and value == 'bold':
style_map['bold'].append(style_id)
elif key == 'tts:fontStyle' and value == 'italic':
style_map['italic'].append(style_id)
return style_map
def strip_tags(text, tags=None):
"""
Returns text with the tags stripped.
By default we allow the standard formatting tags
to pass (i,b,u).
Any other tag's content will be present, but with tags removed.
"""
if tags is None:
tags = DEFAULT_ALLOWED_TAGS
return bleach.clean(text, tags=tags, strip=True)
def escape_ampersands(text):
"""Take a string of chars and replace ampersands with &"""
return text.replace('&', '&')
def entities_to_chars(text):
"""Removes HTML or XML character references and entities from a text string.
http://effbot.org/zone/re-sub.htm#unescape-html
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def from_xmlish_text(input_str):
"""
Parses text content from xml based formats.
<br> tags are transformed into newlines, tab and multiple spaces
collapsed. e.g. turns:
"\n\r foo <br/> bar foorer \t " -> "foo bar\nfoorer"
"""
if not input_str:
return u""
# remove new lines and tabs
input_str = BLANK_CHARS.sub(u"", input_str)
# do convert <br> to new lines
input_str = input_str.replace("<br/>", "\n")
# collapse whitespace on each new line
return "\n".join( MULTIPLE_SPACES.sub(u" ", x).strip() for x in input_str.split('\n'))
def unsynced_time_components(one_hour_digit=False, uses_centiseconds=False):
return {
'hours': 9 if one_hour_digit else 99,
'minutes': 59,
'seconds': 59,
'milliseconds': 99 if uses_centiseconds else 999,
'centiseconds': 99,
}
def milliseconds_to_time_clock_components(milliseconds,
unsynced_val=UNSYNCED_TIME_FULL,
use_centiseconds=False):
"""Convert milliseconds to a dict of hours, minutes, seconds, milliseconds.
Milliseconds should be given as an integer, or None. None will be converted
to all zeros.
If use_centiseconds is True, the resulting dict will have a centiseconds
entry instead of a milliseconds one.
"""
components = dict(hours=0, minutes=0, seconds=0, milliseconds=0)
if milliseconds is not None:
components['seconds'], components['milliseconds'] = divmod(int(milliseconds), 1000)
components['minutes'], components['seconds'] = divmod(components['seconds'], 60 )
components['hours'], components['minutes'] = divmod(components['minutes'], 60 )
if use_centiseconds:
ms = components.pop('milliseconds')
components['centiseconds'] = round(ms / 10.0)
return components
def fraction_to_milliseconds(str_milli):
"""
Converts milliseonds as an integer string to a 3 padded string, e.g.
1 -> 001
10 -> 010
100 -> 100
"""
if not str_milli:
return 0
return int(str_milli.ljust(3, '0')) % 1000
def centiseconds_to_milliseconds(centi):
return int(centi) * 10 if centi else 0
def indent_ttml(tt_elt, indent_width=4):
"""Indent TTML tree
This function walks the XML tree and adjusts the text and tail attributes
so that the output will be nicely indented. It skips <p> elements and
their children, since whitespace is significant there.
Also, we will add a newline after the closing tag for the TT element.
:param tt_elt: etree TT root element.
"""
_do_indent_ttml(tt_elt, " " * indent_width, 0)
tt_elt.tail = "\n"
def _do_indent_ttml(elt, indent, indent_level):
if elt.tag == TTML + 'p' or len(elt) == 0:
return
children = list(elt)
# before a child element, we want to start a new line, then indent enough
# to move them to the next indentation level
pre_child_indent = "\n" + indent * (indent_level + 1)
elt.text = pre_child_indent
for child in children[:-1]:
child.tail = pre_child_indent
# after the last child, we need to position our closing tag. This means
# indenting enough to move it to our indentation level.
children[-1].tail = "\n" + indent * indent_level
for child in children:
_do_indent_ttml(child, indent, indent_level + 1)
|
bsd-3-clause
| 4,482,841,142,669,328,000
| 33.393665
| 129
| 0.616629
| false
| 3.621248
| false
| false
| false
|
markfasheh/ocfs2-tools
|
ocfs2console/ocfs2interface/mount.py
|
1
|
5115
|
# OCFS2Console - GUI frontend for OCFS2 management and debugging
# Copyright (C) 2002, 2005 Oracle. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import gtk
import ocfs2
from guiutil import set_props, error_box
from fstab import FSTab
from process import Process
def mount(parent, device):
mountpoint, options = query_mount(parent, device)
if not mountpoint:
return None
command = ('mount', '-t', 'ocfs2', device, mountpoint)
if options:
command = list(command)
command[1:1] = ('-o', options)
p = Process(command, 'Mount', 'Mounting...', parent, spin_now=True)
success, output, killed = p.reap()
if not success:
if killed:
error_box(parent, 'mount died unexpectedly! Your system is '
'probably in an inconsistent state. You '
'should reboot at the earliest opportunity')
else:
error_box(parent, '%s: Could not mount %s' % (output, device))
return None
else:
return mountpoint
def unmount(parent, device, mountpoint):
command = ('umount', mountpoint)
p = Process(command, 'Unmount', 'Unmounting...', parent)
success, output, killed = p.reap()
if not success:
if killed:
error_box(parent, 'umount died unexpectedly! Your system is '
'probably in an inconsistent state. You '
'should reboot at the earliest opportunity')
else:
error_box(parent, '%s: Could not unmount %s mounted on %s' %
(output, device, mountpoint))
def query_mount(parent, device):
default_mountpoint, default_options = get_defaults(device)
dialog = gtk.Dialog(parent=parent,
flags=gtk.DIALOG_DESTROY_WITH_PARENT,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
table = gtk.Table(rows=2, columns=2)
set_props(table, row_spacing=6,
column_spacing=6,
border_width=6,
parent=dialog.vbox)
def text_changed(entry):
text = entry.get_text()
valid = len(text) > 1 and text.startswith('/')
dialog.set_response_sensitive(gtk.RESPONSE_OK, valid)
mountpoint = gtk.Entry()
mountpoint.connect('changed', text_changed)
mountpoint.set_text(default_mountpoint)
text_changed(mountpoint)
options = gtk.Entry()
options.set_text(default_options)
row = 0
for prompt, entry in (('_Mountpoint', mountpoint),
('O_ptions', options)):
label = gtk.Label()
label.set_text_with_mnemonic(prompt + ':')
set_props(label, xalign=0.0)
table.attach(label, 0, 1, row, row + 1)
entry.set_activates_default(True)
label.set_mnemonic_widget(entry)
table.attach(entry, 1, 2, row, row + 1)
row = row + 1
dialog.show_all()
if dialog.run() == gtk.RESPONSE_OK:
mount_params = mountpoint.get_text(), options.get_text()
else:
mount_params = None, None
dialog.destroy()
return mount_params
def get_defaults(device):
label, uuid = get_ocfs2_id(device)
fstab = FSTab()
entry = fstab.get(device=device, label=label, uuid=uuid)
if entry and entry.vfstype == 'ocfs2':
return entry.mountpoint, entry.options
else:
return '', ''
def get_ocfs2_id(device):
try:
fs = ocfs2.Filesystem(device)
super = fs.fs_super
label = super.s_label
uuid = super.uuid_unparsed
except ocfs2.error:
label = uuid = None
return (label, uuid)
def main():
import sys
device = sys.argv[1]
def dummy(*args):
gtk.main_quit()
window = gtk.Window()
window.connect('delete-event', dummy)
vbbox = gtk.VButtonBox()
window.add(vbbox)
window.mountpoint = None
def test_mount(b):
window.mountpoint = mount(window, device)
button = gtk.Button('Mount')
button.connect('clicked', test_mount)
vbbox.add(button)
def test_unmount(b):
unmount(window, device, window.mountpoint)
button = gtk.Button('Unmount')
button.connect('clicked', test_unmount)
vbbox.add(button)
window.show_all()
gtk.main()
if __name__ == '__main__':
main()
|
gpl-2.0
| 519,026,439,820,794,300
| 27.416667
| 76
| 0.608798
| false
| 3.825729
| false
| false
| false
|
ProjectQ-Framework/FermiLib
|
src/fermilib/utils/_trotter_error_test.py
|
1
|
6490
|
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for _trotter_error.py."""
from future.utils import iteritems
from math import sqrt
import numpy
from scipy.linalg import expm
import unittest
from fermilib.config import *
from fermilib.ops import normal_ordered
from fermilib.transforms import get_sparse_operator
from fermilib.utils import MolecularData
from fermilib.utils._trotter_error import *
from projectq.ops import QubitOperator
class CommutatorTest(unittest.TestCase):
def test_commutator_commutes(self):
zero = QubitOperator()
self.assertTrue(commutator(QubitOperator(()),
QubitOperator('X3')).isclose(zero))
def test_commutator_single_pauli(self):
com = commutator(QubitOperator('X3'),
QubitOperator('Y3'))
expected = 2j * QubitOperator('Z3')
self.assertTrue(expected.isclose(com))
def test_commutator_multi_pauli(self):
com = commutator(QubitOperator('Z1 X2 Y4'),
QubitOperator('X1 Z2 X4'))
expected = -2j * QubitOperator('Y1 Y2 Z4')
self.assertTrue(expected.isclose(com))
class TriviallyCommutesTest(unittest.TestCase):
def test_trivially_commutes_id_id(self):
self.assertTrue(trivially_commutes(
QubitOperator(()), 3 * QubitOperator(())))
def test_trivially_commutes_id_x(self):
self.assertTrue(trivially_commutes(
QubitOperator(()), QubitOperator('X1')))
def test_trivially_commutes_id_xx(self):
self.assertTrue(trivially_commutes(
QubitOperator(()), QubitOperator('X1 X3')))
def test_trivially_commutes_nonid_with_id(self):
self.assertTrue(trivially_commutes(
QubitOperator('X1 Z5 Y9 Z11'), QubitOperator(())))
def test_trivially_commutes_no_intersect(self):
self.assertTrue(trivially_commutes(
QubitOperator('X1 Y3 Z6'), QubitOperator('Z0 Z2 X4 Y5')))
def test_trivially_commutes_allsame_oddintersect(self):
self.assertTrue(trivially_commutes(
QubitOperator('X1 X3 X4 Z6 X8'), QubitOperator('X1 X3 X4 Z7 Y9')))
def test_trivially_commutes_even_anti(self):
self.assertTrue(trivially_commutes(
QubitOperator('X1 Z2 Z3 X10'), QubitOperator('Y1 X2 Z3 Y9')))
def test_no_trivial_commute_odd_anti(self):
self.assertFalse(trivially_commutes(
QubitOperator('X1'), QubitOperator('Y1')))
def test_no_trivial_commute_triple_anti_intersect(self):
self.assertFalse(trivially_commutes(
QubitOperator('X0 Z2 Z4 Z9 Y17'),
QubitOperator('Y0 X2 Y4 Z9 Z16')))
def test_no_trivial_commute_mostly_commuting(self):
self.assertFalse(trivially_commutes(
QubitOperator('X0 Y1 Z2 X4 Y5 Y6'),
QubitOperator('X0 Y1 Z2 X4 Z5 Y6')))
class TriviallyDoubleCommutesTest(unittest.TestCase):
def test_trivial_double_commute_no_intersect(self):
self.assertTrue(trivially_double_commutes(
QubitOperator('X1 Z2 Y4'), QubitOperator('Y0 X3 Z6'),
QubitOperator('Y5')))
def test_trivial_double_commute_no_intersect_a_bc(self):
self.assertTrue(trivially_double_commutes(
QubitOperator('X1 Z2 Y4'), QubitOperator('Y0 X3 Z6'),
QubitOperator('Z3 Y5')))
def test_trivial_double_commute_bc_intersect_commute(self):
self.assertTrue(trivially_double_commutes(
QubitOperator('X1 Z2 Y4'), QubitOperator('X0 Z3'),
QubitOperator('Y0 X3')))
class ErrorOperatorTest(unittest.TestCase):
def test_error_operator_bad_order(self):
with self.assertRaises(NotImplementedError):
error_operator([QubitOperator], 1)
def test_error_operator_all_diagonal(self):
terms = [QubitOperator(()), QubitOperator('Z0 Z1 Z2'),
QubitOperator('Z0 Z3'), QubitOperator('Z0 Z1 Z2 Z3')]
zero = QubitOperator()
self.assertTrue(zero.isclose(error_operator(terms)))
class ErrorBoundTest(unittest.TestCase):
def test_error_bound_xyz_tight(self):
terms = [QubitOperator('X1'), QubitOperator('Y1'), QubitOperator('Z1')]
expected = sqrt(7. / 12) # 2-norm of [[-2/3, 1/3+i/6], [1/3-i/6, 2/3]]
self.assertLess(expected, error_bound(terms, tight=True))
def test_error_bound_xyz_loose(self):
terms = [QubitOperator('X1'), QubitOperator('Y1'), QubitOperator('Z1')]
self.assertTrue(numpy.isclose(
error_bound(terms, tight=False), 4. * (2 ** 2 + 1 ** 2)))
def test_error_operator_xyz(self):
terms = [QubitOperator('X1'), QubitOperator('Y1'), QubitOperator('Z1')]
expected = numpy.array([[-2./3, 1./3 + 1.j/6, 0., 0.],
[1./3 - 1.j/6, 2./3, 0., 0.],
[0., 0., -2./3, 1./3 + 1.j/6],
[0., 0., 1./3 - 1.j/6, 2./3]])
sparse_op = get_sparse_operator(error_operator(terms))
matrix = sparse_op.todense()
self.assertTrue(numpy.allclose(matrix, expected),
("Got " + str(matrix)))
def test_error_bound_qubit_tight_less_than_loose_integration(self):
terms = [QubitOperator('X1'), QubitOperator('Y1'), QubitOperator('Z1')]
self.assertLess(error_bound(terms, tight=True),
error_bound(terms, tight=False))
class TrotterStepsRequiredTest(unittest.TestCase):
def test_trotter_steps_required(self):
self.assertEqual(trotter_steps_required(
trotter_error_bound=0.3, time=2.5, energy_precision=0.04), 7)
def test_trotter_steps_required_negative_time(self):
self.assertEqual(trotter_steps_required(
trotter_error_bound=0.1, time=3.3, energy_precision=0.11), 4)
def test_return_type(self):
self.assertIsInstance(trotter_steps_required(0.1, 0.1, 0.1), int)
|
apache-2.0
| 837,469,697,044,621,600
| 38.333333
| 79
| 0.642527
| false
| 3.254764
| true
| false
| false
|
tadamic/sokoenginepy
|
src/sokoenginepy/tessellation/hexoban_tessellation/hexoban_tessellation.py
|
1
|
3295
|
from ...utilities import COLUMN, ROW, index_1d, inverted, is_on_board_2d
from ..direction import Direction, UnknownDirectionError
from ..tessellation_base import TessellationBase, TessellationBaseInheritableDocstrings
class HexobanTessellation(
TessellationBase, metaclass=TessellationBaseInheritableDocstrings
):
_LEGAL_DIRECTIONS = (
Direction.LEFT,
Direction.RIGHT,
Direction.NORTH_EAST,
Direction.NORTH_WEST,
Direction.SOUTH_EAST,
Direction.SOUTH_WEST,
)
_CHR_TO_ATOMIC_MOVE = None
_ATOMIC_MOVE_TO_CHR = None
@property
@copy_ancestor_docstring
def legal_directions(self):
return self._LEGAL_DIRECTIONS
@property
@copy_ancestor_docstring
def graph_type(self):
from ...graph import GraphType
return GraphType.DIRECTED
@copy_ancestor_docstring
def neighbor_position(self, position, direction, board_width, board_height):
# if not is_on_board_1d(position, board_width, board_height):
# return None
row = ROW(position, board_width)
column = COLUMN(position, board_width)
if direction == Direction.LEFT:
column -= 1
elif direction == Direction.RIGHT:
column += 1
elif direction == Direction.NORTH_EAST:
column += row % 2
row -= 1
elif direction == Direction.NORTH_WEST:
column -= (row + 1) % 2
row -= 1
elif direction == Direction.SOUTH_EAST:
column += row % 2
row += 1
elif direction == Direction.SOUTH_WEST:
column -= (row + 1) % 2
row += 1
else:
raise UnknownDirectionError(direction)
if is_on_board_2d(column, row, board_width, board_height):
return index_1d(column, row, board_width)
return None
@property
def _char_to_atomic_move_dict(self):
if not self.__class__._CHR_TO_ATOMIC_MOVE:
from ...snapshot import AtomicMoveCharacters
self.__class__._CHR_TO_ATOMIC_MOVE = {
AtomicMoveCharacters.l: (Direction.LEFT, False),
AtomicMoveCharacters.L: (Direction.LEFT, True),
AtomicMoveCharacters.r: (Direction.RIGHT, False),
AtomicMoveCharacters.R: (Direction.RIGHT, True),
AtomicMoveCharacters.u: (Direction.NORTH_WEST, False),
AtomicMoveCharacters.U: (Direction.NORTH_WEST, True),
AtomicMoveCharacters.d: (Direction.SOUTH_EAST, False),
AtomicMoveCharacters.D: (Direction.SOUTH_EAST, True),
AtomicMoveCharacters.n: (Direction.NORTH_EAST, False),
AtomicMoveCharacters.N: (Direction.NORTH_EAST, True),
AtomicMoveCharacters.s: (Direction.SOUTH_WEST, False),
AtomicMoveCharacters.S: (Direction.SOUTH_WEST, True),
}
return self._CHR_TO_ATOMIC_MOVE
@property
def _atomic_move_to_char_dict(self):
if not self.__class__._ATOMIC_MOVE_TO_CHR:
self.__class__._ATOMIC_MOVE_TO_CHR = inverted(
self._char_to_atomic_move_dict
)
return self._ATOMIC_MOVE_TO_CHR
def __str__(self):
return "hexoban"
|
gpl-3.0
| -4,547,235,167,960,441,300
| 34.053191
| 87
| 0.598786
| false
| 3.702247
| false
| false
| false
|
tolteck/stripe_mock_server
|
localstripe/errors.py
|
1
|
1326
|
# -*- coding: utf-8 -*-
# Copyright 2017 Adrien Vergé
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from aiohttp import web
def json_response(*args, **kwargs):
return web.json_response(
*args,
dumps=lambda x: json.dumps(x, indent=2, sort_keys=True) + '\n',
**kwargs)
class UserError(Exception):
def __init__(self, code, message=None, contents=None):
Exception.__init__(self)
self.code = code
self.body = {'error': contents or {}}
self.body['error']['type'] = 'invalid_request_error'
if message is not None:
self.body['error']['message'] = message
def to_response(self):
return json_response(self.body, status=self.code)
|
gpl-3.0
| 9,209,162,390,930,675,000
| 32.974359
| 71
| 0.678491
| false
| 3.88563
| false
| false
| false
|
lanhel/pyzombie
|
setup/lib/distutils_local/build_docutils.py
|
1
|
4094
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#-------------------------------------------------------------------------------
"""test
Implements a Distutils 'test' command."""
__author__ = ('Lance Finn Helsten',)
__version__ = '1.0.1'
__copyright__ = """Copyright 2009 Lance Finn Helsten (helsten@acm.org)"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ['build_docutils']
import sys
if sys.version_info < (3, 0):
raise Exception("pytrader requires Python 3.0 or higher.")
import os
import itertools
import subprocess
from distutils.core import Command
def doc_paths(packages):
"""Given a list of package names find all the reStructured text files
with a '.rst' extension."""
dirs = [p.replace('.', os.sep) for p in packages]
dirs = [os.path.abspath(p) for p in dirs]
files = [[os.path.join(p, f) for f in os.listdir(p)] for p in dirs]
files = [f for f in itertools.chain(*files) if os.path.splitext(f)[1] == '.rst']
files = [os.path.relpath(f) for f in files]
return files
class build_docutils(Command):
description = "Build documentation with Docutils."
user_options = [
('build-base=', 'b', "base directory for build library"),
('build-lib=', None, "build directory for all distribution"),
('force', 'f', 'Build documentation ignoring timestamps.')
]
def has_docs(self):
return len(doc_paths(self.distribution.packages)) > 0
def initialize_options(self):
self.build_base = 'build'
self.build_lib = None
self.force = False
def finalize_options(self):
if self.build_lib is None:
self.build_lib = os.path.join(self.build_base, 'lib')
def run(self):
args = ["rst2html.py",
"--stylesheet", "help.css",
"--link-stylesheet",
"--traceback",
"SRC_PATH_ARG_2",
"DST_PATH_ARG_3"]
#Process the reStructuredText files.
try:
for f in doc_paths(self.distribution.packages):
src = os.path.abspath(f)
dst = os.path.abspath(
os.path.join(self.build_lib, os.path.splitext(f)[0] + ".html"))
if not os.path.isdir(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
if self.force or not os.path.isfile(dst) or os.path.getmtime(src) > os.path.getmtime(dst):
print("Docutils", f)
args[-2] = os.path.abspath(src)
args[-1] = os.path.abspath(dst)
ret = subprocess.call(args)
except OSError as err:
if err.errno == errno.ENOENT:
print("error: Docutils missing.", file=sys.stderr)
raise err
#Copy CSS files
for p in doc_dirs(self.distribution.packages):
src = '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/docutils/writers/html4css1/html4css1.css'
dst = os.path.join(self.build_lib, p, 'html4css1.css')
print("Copy", dst)
shutil.copyfile(src, dst)
files = [[os.path.join(p, f) for f in os.listdir(p)]
for p in doc_dirs(self.distribution.packages)]
files = [f for f in itertools.chain(*files)]
files = [f for f in files if os.path.splitext(f)[1] not in [".py", ".rst"]]
for f in files:
src = os.path.abspath(f)
dst = os.path.abspath(os.path.join(self.build_lib, f))
shutil.copyfile(src, dst)
|
apache-2.0
| -7,969,874,219,492,791,000
| 37.990476
| 148
| 0.586957
| false
| 3.776753
| false
| false
| false
|
yxdong/ybk
|
ybk/lighttrade/sysframe/client.py
|
1
|
7588
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import random
import logging
from concurrent.futures import ThreadPoolExecutor
import requests
from requests.packages.urllib3.util import is_connection_dropped
import xmltodict
from .protocol import (UserProtocol, TradeProtocol,
MoneyProtocol, OfferProtocol)
requests.packages.urllib3.disable_warnings()
log = logging.getLogger('sysframe')
class Client(UserProtocol, TradeProtocol, MoneyProtocol, OfferProtocol):
def __init__(self,
front_url,
tradeweb_url):
"""
:param front_url: http://HOST:PORT
:param tradeweb_url: [http://HOST:PORT/issue_tradeweb/httpXmlServlet]
"""
self.front_url = front_url or ''
self.tradeweb_urls = tradeweb_url
self.tradeweb_url = random.choice(tradeweb_url)
for url in tradeweb_url:
if url.startswith(self.front_url):
self.front_url = self.tradeweb_url.rsplit('/', 2)[0]
break
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=10,
pool_maxsize=10)
self.session.mount('http://', adapter)
self.session.headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
self.executor = ThreadPoolExecutor(2)
self.executor.submit(self.warmup, 1)
self._reset()
def _reset(self):
self.cid = None # customer_id
self.uid = None # user_id
self.sid = None # session_id
self.mid = '99' # market_id
self.jsid = None # cookie
self.username = None
self.password = None
self.latency = None
self.time_offset = None
self.last_error = ''
def error(self, msg):
self.last_error = msg
log.error(msg)
@property
def is_logged_in(self):
return self.sid is not None
def request_tradeweb(self, protocol, params):
return self.request_xml(protocol, params, mode='tradeweb')
def request_front(self, protocol, params):
return self.request_xml(protocol, params, mode='front')
def request_xml(self, protocol, params, mode='tradeweb', headers={},
to=1):
""" 发送交易指令
- 拼接请求成xml
- 发送
- 解析返回的请求
"""
if mode == 'tradeweb':
url = self.tradeweb_url
elif mode == 'front':
url = self.front_url + \
'/common_front/checkneedless/user/logon/logon.action'
xml = self._create_xml(protocol, params)
log.debug('发送请求 {}: {}'.format(url, xml))
try:
r = self.session.post(
url, headers=headers, data=xml, verify=False,
timeout=(to, to))
except requests.exceptions.RequestException:
self.tradeweb_url = random.choice(self.tradeweb_urls)
if to <= 32:
to *= 2
else:
raise ValueError('连接超时')
return self.request_xml(protocol, params, mode, headers, to=to)
result = r.content.decode('gb18030', 'ignore')
log.debug('收到返回 {}'.format(result))
if len(result) > 0:
return xmltodict.parse(result)
else:
raise ValueError('请求出错, 请检查请求格式/网络连接')
def warmup(self, size=5):
""" Warmup Connection Pools """
t0 = time.time()
url = self.tradeweb_url
a = self.session.get_adapter(url)
p = a.get_connection(url)
count = 0
conns = [p._get_conn() for _ in range(size)]
for c in conns:
if is_connection_dropped(c):
count += 1
c.connect()
p._put_conn(c)
p.pool.queue = list(reversed(p.pool.queue))
if count > 0:
log.info('重新连接{}个连接, 花费{}秒'
''.format(count, time.time() - t0))
def clear_connections(self):
url = self.tradeweb_url
a = self.session.get_adapter(url)
p = a.get_connection(url)
p.pool.queue = []
def request_ff(self, requests, interval=0.001, repeat=1, response=False):
""" Fire and Forget Requests in Batch
:param requests: [(protocol, params), ...]
"""
if len(requests) * repeat > 90:
repeat = 90 // len(requests)
log.warning('批量请求次数太多, 自动降频到重复{}次'.format(repeat))
if repeat < 1:
raise ValueError('单次批量请求太多, 请设置在90以下')
xmls = [self._create_xml(protocol, params)
for protocol, params in requests]
bxmls = [xml.encode('utf-8') for xml in xmls]
url = self.tradeweb_url
a = self.session.get_adapter(url)
p = a.get_connection(url)
c = p._get_conn()
if is_connection_dropped(c):
c.connect()
hu = url[url.find('//') + 2:]
host, uri = hu.split('/', 1)
def build_request(bxml):
data = 'POST /{} HTTP/1.1\r\n'.format(uri) + \
'HOST: {}\r\n'.format(host) + \
'COOKIE: JSESSIONID={}\r\n'.format(self.jsid) + \
'Connection: Keep-Alive\r\n' + \
'Content-Length: {}\r\n'.format(len(bxml)) + \
'\r\n'
data = data.encode('gb18030') + bxml
return data
begin = time.time()
sleep_overhead = 0.0002
for _ in range(repeat):
for bxml in bxmls:
t0 = time.time()
data = build_request(bxml)
c.sock.sendall(data)
used = time.time() - t0
if used < interval - sleep_overhead:
time.sleep(interval - used - sleep_overhead)
end = time.time()
log.info('批量请求发送完毕, {}秒内发送了{}个请求'
''.format(end - begin, len(bxmls) * repeat))
# Parsing Results
if response:
results = []
count = len(xmls) * repeat
f = c.sock.makefile('rb')
while count > 0:
count -= 1
length = 0
line = f.readline().strip()
if not line.startswith(b'HTTP/1.1'):
break
while True:
line = f.readline().strip()
if not line:
break
key, value = line.split(b': ')
if key == b'Content-Length':
length = int(value)
content = f.read(length)
text = content.decode('gb18030', 'ignore')
results.append(xmltodict.parse(text))
p._put_conn(c)
return results
else:
# we are closing one connection, for performance consideration
# let's open another connection (if necessory) in background
self.executor.submit(self.warmup, 3)
c.close()
def _create_xml(self, protocol, params):
header = '<?xml version="1.0" encoding="gb2312"?>'
reqs = []
for key, value in params.items():
reqs.append('<{}>{}</{}>'.format(key, value, key))
req = ''.join(reqs)
body = '<GNNT><REQ name="{}">{}</REQ></GNNT>'.format(protocol, req)
return header + body
|
mit
| 2,448,941,336,220,080,600
| 32.707763
| 77
| 0.518288
| false
| 3.689155
| false
| false
| false
|
tensorflow/tpu
|
models/experimental/show_and_tell/show_and_tell_model.py
|
1
|
13116
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image-to-text implementation based on http://arxiv.org/abs/1411.4555.
"Show and Tell: A Neural Image Caption Generator"
Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
import tensorflow.compat.v1 as tf
import image_embedding
import image_processing
import inputs as input_ops
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import rnn as contrib_rnn
class ShowAndTellModel(object):
"""Image-to-text implementation based on http://arxiv.org/abs/1411.4555.
"Show and Tell: A Neural Image Caption Generator"
Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan
"""
def __init__(self, config, mode, train_inception=False):
"""Basic setup.
Args:
config: Object containing configuration parameters.
mode: "train", "eval" or "inference".
train_inception: Whether the inception submodel variables are trainable.
"""
assert mode in ["train", "eval", "inference"]
self.config = config
self.mode = mode
self.train_inception = train_inception
# To match the "Show and Tell" paper we initialize all variables with a
# random uniform initializer.
self.initializer = tf.random_uniform_initializer(
minval=-self.config.initializer_scale,
maxval=self.config.initializer_scale)
# A float32 Tensor with shape [batch_size, height, width, channels].
self.images = None
# An int32 Tensor with shape [batch_size, padded_length].
self.input_seqs = None
# An int32 Tensor with shape [batch_size, padded_length].
self.target_seqs = None
# An int32 0/1 Tensor with shape [batch_size, padded_length].
self.input_mask = None
# A float32 Tensor with shape [batch_size, embedding_size].
self.image_embeddings = None
# A float32 Tensor with shape [batch_size, padded_length, embedding_size].
self.seq_embeddings = None
# A float32 scalar Tensor; the total loss for the trainer to optimize.
self.total_loss = None
# A float32 Tensor with shape [batch_size * padded_length].
self.target_cross_entropy_losses = None
# A float32 Tensor with shape [batch_size * padded_length].
self.target_cross_entropy_loss_weights = None
# Collection of variables from the inception submodel.
self.inception_variables = []
# Function to restore the inception submodel from checkpoint.
self.init_fn = None
# Global step Tensor.
self.global_step = None
def is_training(self):
"""Returns true if the model is built for training mode."""
return self.mode == "train"
def load_image(self, encoded_image, thread_id=0):
"""Decodes and processes an image string.
Args:
encoded_image: A scalar string Tensor; the encoded image.
thread_id: Preprocessing thread id used to select the ordering of color
distortions.
Returns:
A float32 Tensor of shape [height, width, 3]; the processed image.
"""
return image_processing.process_image(
encoded_image,
is_training=self.is_training(),
height=self.config.image_height,
width=self.config.image_width,
thread_id=thread_id,
image_format=self.config.image_format)
def distort_images(self, images, seed):
"""Distort a batch of images.
(Processing a batch allows us to easily switch between TPU and CPU
execution).
"""
if self.mode == "train":
images = image_processing.distort_image(images, seed)
# Rescale to [-1,1] instead of [0, 1]
images = tf.subtract(images, 0.5)
images = tf.multiply(images, 2.0)
return images
def build_inputs(self):
"""Input prefetching, preprocessing and batching.
Outputs:
self.images
self.input_seqs
self.target_seqs (training and eval only)
self.input_mask (training and eval only)
"""
if self.mode == "inference":
# In inference mode, images and inputs are fed via placeholders.
image_feed = tf.placeholder(dtype=tf.string, shape=[], name="image_feed")
input_feed = tf.placeholder(
dtype=tf.int64,
shape=[None], # batch_size
name="input_feed")
# Process image and insert batch dimensions.
images = tf.expand_dims(self.load_image(image_feed), 0)
input_seqs = tf.expand_dims(input_feed, 1)
# No target sequences or input mask in inference mode.
target_seqs = None
input_mask = None
else:
def _load_example(serialized_example):
encoded_image, caption = input_ops.parse_example(
serialized_example,
image_feature=self.config.image_feature_name,
caption_feature=self.config.caption_feature_name)
image = self.load_image(encoded_image)
# strings.split expects a batch
input_seqs, target_seqs, input_mask = input_ops.pad_caption_to_input(
caption)
return image, input_seqs, target_seqs, input_mask
def _load_dataset(filename):
return tf.data.TFRecordDataset(filename, buffer_size=16 * 1024 * 1024)
df = tf.data.Dataset.list_files(
self.config.input_file_pattern, shuffle=self.mode == "train")
df = df.apply(
tf.data.experimental.parallel_interleave(
_load_dataset, cycle_length=64, sloppy=True))
if self.mode == "train":
df = df.repeat()
df = df.shuffle(1024)
df = df.apply(
tf.data.experimental.map_and_batch(
_load_example,
self.config.batch_size,
num_parallel_batches=8,
drop_remainder=True))
df = df.prefetch(8)
images, input_seqs, target_seqs, input_mask = df.make_one_shot_iterator(
).get_next()
self.images = images
self.input_seqs = input_seqs
self.target_seqs = target_seqs
self.input_mask = input_mask
def build_image_embeddings(self, images):
"""Builds the image model subgraph and generates image embeddings."""
images = self.distort_images(images, tf.train.get_or_create_global_step())
inception_output = image_embedding.inception_v3(
images,
trainable=self.train_inception,
is_training=self.is_training(),
add_summaries=False)
self.inception_variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")
# Map inception output into embedding space.
with tf.variable_scope("image_embedding") as scope:
image_embeddings = contrib_layers.fully_connected(
inputs=inception_output,
num_outputs=self.config.embedding_size,
activation_fn=None,
weights_initializer=self.initializer,
biases_initializer=None,
scope=scope)
# Save the embedding size in the graph.
tf.constant(self.config.embedding_size, name="embedding_size")
return image_embeddings
def build_seq_embeddings(self, input_seqs):
"""Builds the input sequence embeddings.
Inputs:
input_seqs
Outputs:
self.seq_embeddings
"""
with tf.variable_scope("seq_embedding"), tf.device("/cpu:0"):
embedding_map = tf.get_variable(
name="map",
shape=[self.config.vocab_size, self.config.embedding_size],
initializer=self.initializer)
seq_embeddings = tf.nn.embedding_lookup(embedding_map, input_seqs)
return seq_embeddings
def build_model(self):
"""Builds the model.
Inputs:
self.image_embeddings
self.seq_embeddings
self.target_seqs (training and eval only)
self.input_mask (training and eval only)
Outputs:
self.total_loss (training and eval only)
self.target_cross_entropy_losses (training and eval only)
self.target_cross_entropy_loss_weights (training and eval only)
"""
# This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the
# modified LSTM in the "Show and Tell" paper has no biases and outputs
# new_c * sigmoid(o).
lstm_cell = contrib_rnn.BasicLSTMCell(
num_units=self.config.num_lstm_units, state_is_tuple=True)
if self.mode == "train":
lstm_cell = contrib_rnn.DropoutWrapper(
lstm_cell,
input_keep_prob=self.config.lstm_dropout_keep_prob,
output_keep_prob=self.config.lstm_dropout_keep_prob)
with tf.variable_scope("lstm", initializer=self.initializer) as lstm_scope:
# Feed the image embeddings to set the initial LSTM state.
zero_state = lstm_cell.zero_state(
batch_size=self.image_embeddings.get_shape()[0], dtype=tf.float32)
_, initial_state = lstm_cell(self.image_embeddings, zero_state)
# Allow the LSTM variables to be reused.
lstm_scope.reuse_variables()
if self.mode == "inference":
# In inference mode, use concatenated states for convenient feeding and
# fetching.
tf.concat(initial_state, 1, name="initial_state")
# Placeholder for feeding a batch of concatenated states.
state_feed = tf.placeholder(
dtype=tf.float32,
shape=[None, sum(lstm_cell.state_size)],
name="state_feed")
state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1)
# Run a single LSTM step.
lstm_outputs, state_tuple = lstm_cell(
inputs=tf.squeeze(self.seq_embeddings, squeeze_dims=[1]),
state=state_tuple)
# Concatentate the resulting state.
tf.concat(state_tuple, 1, name="state")
else:
# Run the batch of sequence embeddings through the LSTM.
sequence_length = tf.reduce_sum(self.input_mask, 1)
lstm_outputs, _ = tf.nn.dynamic_rnn(
cell=lstm_cell,
inputs=self.seq_embeddings,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32,
scope=lstm_scope)
# Stack batches vertically.
lstm_outputs = tf.reshape(lstm_outputs, [-1, lstm_cell.output_size])
with tf.variable_scope("logits") as logits_scope:
logits = contrib_layers.fully_connected(
inputs=lstm_outputs,
num_outputs=self.config.vocab_size,
activation_fn=None,
weights_initializer=self.initializer,
scope=logits_scope)
if self.mode == "inference":
tf.nn.softmax(logits, name="softmax")
else:
targets = tf.reshape(self.target_seqs, [-1])
weights = tf.to_float(tf.reshape(self.input_mask, [-1]))
# Compute losses.
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
batch_loss = tf.div(
tf.reduce_sum(tf.multiply(losses, weights)),
tf.reduce_sum(weights),
name="batch_loss")
tf.losses.add_loss(batch_loss)
total_loss = tf.losses.get_total_loss()
self.total_loss = total_loss
self.target_cross_entropy_losses = losses # Used in evaluation.
self.target_cross_entropy_loss_weights = weights # Used in evaluation.
def setup_inception_initializer(self):
"""Sets up the function to restore inception variables from checkpoint."""
if self.mode != "inference":
# Restore inception variables only.
saver = tf.train.Saver(self.inception_variables)
def restore_fn(sess):
tf.logging.info("Restoring Inception variables from checkpoint file %s",
self.config.inception_checkpoint_file)
saver.restore(sess, self.config.inception_checkpoint_file)
self.init_fn = restore_fn
def setup_global_step(self):
"""Sets up the global step Tensor."""
self.global_step = tf.train.get_or_create_global_step()
def build_model_for_tpu(self, images, input_seqs, target_seqs, input_mask):
self.image_embeddings = self.build_image_embeddings(images)
self.seq_embeddings = self.build_seq_embeddings(target_seqs)
self.target_seqs = target_seqs
self.input_mask = input_mask
self.build_model()
def build(self):
"""Creates all ops for training and evaluation."""
self.build_inputs()
self.image_embeddings = self.build_image_embeddings(self.images)
self.seq_embeddings = self.build_seq_embeddings(self.input_seqs)
self.build_model()
self.setup_inception_initializer()
self.setup_global_step()
|
apache-2.0
| 4,280,079,833,549,058,000
| 34.448649
| 80
| 0.658432
| false
| 3.816119
| true
| false
| false
|
mganeva/mantid
|
scripts/HFIR_4Circle_Reduction/optimizelatticewindow.py
|
1
|
3900
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=C0103
from __future__ import (absolute_import, division, print_function)
from qtpy.QtWidgets import (QMainWindow)
from qtpy.QtCore import Signal as pyqtSignal
from mantid.kernel import Logger
try:
from mantidqt.utils.qt import load_ui
except ImportError:
Logger("HFIR_4Circle_Reduction").information('Using legacy ui importer')
from mantidplot import load_ui
class OptimizeLatticeWindow(QMainWindow):
"""
Main window widget to set up parameters to optimize
"""
# establish signal for communicating from App2 to App1 - must be defined before the constructor
mySignal = pyqtSignal(int)
def __init__(self, parent=None):
"""
Initialization
:param parent:
:return:
"""
# init
QMainWindow.__init__(self, parent)
ui_path = "OptimizeLattice.ui"
self.ui = load_ui(__file__, ui_path, baseinstance=self)
# initialize widgets
self.ui.comboBox_unitCellTypes.addItems(['Cubic',
'Tetragonal',
'Orthorhombic',
'Hexagonal',
'Rhombohedral',
'Monoclinic',
'Triclinic'])
self.ui.comboBox_ubSource.addItems(['Tab - Calculate UB Matrix', 'Tab - Accepted UB Matrix'])
self.ui.lineEdit_tolerance.setText('0.12')
# define event handling
self.ui.pushButton_Ok.clicked.connect(self.do_ok)
self.ui.pushButton_cancel.clicked.connect(self.do_quit)
if parent is not None:
# connect to the method to refine UB matrix by constraining lattice parameters
self.mySignal.connect(parent.refine_ub_lattice)
# flag to trace back its previous step
self._prevIndexByFFT = False
return
def do_ok(self):
"""
User decide to go on and then send a signal to parent
:return:
"""
tolerance = self.get_tolerance()
if tolerance is None:
raise RuntimeError('Tolerance cannot be left blank!')
# set up a hand-shaking signal
signal_value = 1000
self.mySignal.emit(signal_value)
# quit
self.do_quit()
return
def do_quit(self):
"""
Quit the window
:return:
"""
self.close()
return
def get_unit_cell_type(self):
"""
Get the tolerance
:return:
"""
unit_cell_type = str(self.ui.comboBox_unitCellTypes.currentText())
return unit_cell_type
def get_tolerance(self):
"""
Get the tolerance for refining UB matrix with unit cell type.
:return:
"""
tol_str = str(self.ui.lineEdit_tolerance.text()).strip()
if len(tol_str) == 0:
# blank: return None
tol = None
else:
tol = float(tol_str)
return tol
def get_ub_source(self):
"""
Get the index of the tab where the UB matrix comes from
:return:
"""
source = str(self.ui.comboBox_ubSource.currentText())
if source == 'Tab - Calculate UB Matrix':
tab_index = 3
else:
tab_index = 4
return tab_index
def set_prev_ub_refine_method(self, use_fft=False):
"""
:param use_fft:
:return:
"""
self._prevIndexByFFT = use_fft
return
|
gpl-3.0
| -8,642,162,336,785,655,000
| 27.057554
| 101
| 0.554103
| false
| 4.193548
| false
| false
| false
|
alexandregz/simian
|
src/simian/util/compile_js.py
|
1
|
1961
|
#!/usr/bin/env python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Uses Closure Compiler Service API to compile JavaScript file.
# ./compile_js.py <path_to_input_js_file> ... <path_to_output_js_file>
import httplib
import urllib
import re
import sys
CLOSURE_SERVICE_DOMAIN = 'closure-compiler.appspot.com'
BASE_URL = 'https://raw.githubusercontent.com/google/simian/master/src/simian/mac/admin/js/'
JS_FILES = ['main.js', 'forms.js', 'menu.js', 'net.js', 'tags.js']
CODE_URLS = [BASE_URL + f for f in JS_FILES]
output_js_file = sys.argv[1]
# Param docs: https://developers.google.com/closure/compiler/docs/api-ref
params = [
('compilation_level', 'ADVANCED_OPTIMIZATIONS'),
('output_format', 'text'),
('output_info', 'compiled_code'),
('use_closure_library', True),
]
for url in CODE_URLS:
params.append(('code_url', url))
params = urllib.urlencode(params)
# Always use the following value for the Content-type header.
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPConnection(CLOSURE_SERVICE_DOMAIN)
conn.request('POST', '/compile', params, headers)
response = conn.getresponse()
response_text = response.read()
conn.close()
if response.status != 200 or response_text.startswith('Error'):
print >>sys.stderr, 'JS compilation failed: %s' % response_text
sys.exit(1)
f = open(output_js_file, 'w')
f.write(response_text)
f.close()
|
apache-2.0
| 2,229,931,755,863,811,600
| 31.147541
| 92
| 0.721061
| false
| 3.27379
| false
| false
| false
|
gonicus/gosa
|
doc/sphinx-cindex/setup.py
|
1
|
1188
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
long_desc = '''
This package contains the cindex Sphinx extension.
Allows declaring cindex specs wherever in the documentation (for instance,
in docstrings of UnitTest.test_* methods) and displaying them as a single
list.
'''
requires = ['Sphinx>=0.6']
setup(
name='sphinxcontrib-cindex',
version='0.1',
license='GPL',
author='Fabian Hickert',
author_email='hickert@gonicus.de',
description='Sphinx "cindex" extension',
long_description=long_desc,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(),
include_package_data=True,
install_requires=requires,
namespace_packages=['sphinxcontrib'],
package_data={'sphinxcontrib': ['cindex.css']},
)
|
lgpl-2.1
| -4,032,579,850,245,826,600
| 27.285714
| 74
| 0.648148
| false
| 4
| false
| false
| false
|
npawelek/rpc-maas
|
playbooks/files/rax-maas/plugins/neutron_api_local_check.py
|
1
|
3562
|
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import ipaddr
from maas_common import generate_local_endpoint
from maas_common import get_openstack_client
from maas_common import metric
from maas_common import metric_bool
from maas_common import print_output
from maas_common import status_err
from maas_common import status_ok
from requests import exceptions as exc
def check(args):
neutron = get_openstack_client('network')
try:
neutron_local_endpoint = generate_local_endpoint(
str(neutron.get_endpoint()), args.ip, args.port,
args.protocol, '/agents'
)
resp = neutron.session.get(neutron_local_endpoint, timeout=180)
except (exc.ConnectionError, exc.HTTPError, exc.Timeout):
is_up = False
metric_bool('client_success', False, m_name='maas_neutron')
# Any other exception presumably isn't an API error
except Exception as e:
metric_bool('client_success', False, m_name='maas_neutron')
status_err(str(e), m_name='maas_neutron')
else:
is_up = True
milliseconds = resp.elapsed.total_seconds() * 1000
metric_bool('client_success', True, m_name='maas_neutron')
# Gather a few metrics
agents = len(resp.json()['agents'])
networks = len([i for i in neutron.networks()])
routers = len([i for i in neutron.routers()])
subnets = len([i for i in neutron.subnets()])
status_ok(m_name='maas_neutron')
metric_bool('neutron_api_local_status', is_up, m_name='maas_neutron')
# Only send metrics if the API is up
if is_up:
metric('neutron_api_local_response_time',
'double',
'%.3f' % milliseconds,
'ms')
metric('neutron_agents', 'uint32', agents, 'agents')
metric('neutron_networks', 'uint32', networks, 'networks')
metric('neutron_routers', 'uint32', routers, 'agents')
metric('neutron_subnets', 'uint32', subnets, 'subnets')
def main(args):
check(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Check Neutron API against local or remote address')
parser.add_argument('ip', nargs='?',
type=ipaddr.IPv4Address,
help='Optional Neutron API server address')
parser.add_argument('--telegraf-output',
action='store_true',
default=False,
help='Set the output format to telegraf')
parser.add_argument('--port',
action='store',
default='9696',
help='Port for neutron API service')
parser.add_argument('--protocol',
action='store',
default='http',
help='Protocol for the neutron API service')
args = parser.parse_args()
with print_output(print_telegraf=args.telegraf_output):
main(args)
|
apache-2.0
| -165,083,766,257,173,760
| 36.104167
| 74
| 0.623526
| false
| 4.13705
| false
| false
| false
|
billzorn/msp-pymodel
|
lib/msp_assem.py
|
1
|
7687
|
import msp_fr5969_model as model
from msp_isa import isa
# low level wrappers for isa methods
def _as(fmt, name, smode, dmode, fields):
ins = isa.modes_to_instr(fmt, name, smode, dmode)
#print('{:s} {:s} {:s} {:s}'.format(name, smode, dmode, repr(fields)))
words = isa.inhabitant(ins, fields)
return words
def assemble(name, smode, dmode, fields):
fmt = isa.name_to_fmt[name]
return _as(fmt, name, smode, dmode, fields)
# We record used registers as sets: this could be very compactly represented
# with machine integer backed bit sets, but whatever.
# We distinguish between two different ways to "use" a register: a "use" depends
# on the data in it, so other instructions are not free to overwrite it. A
# "clobber" puts unknown data into the register (due to expected differences
# between the hardware and the simulator) and needs to be cleaned up at some
# point.
class Reginfo(object):
def __init__(self, uses = {}, clobbers = []):
self.uses = uses
self.clobbers = set(clobbers)
def conflict(self, reg):
if reg in self.uses:
return self.uses[reg]
elif reg in self.clobbers:
return True
return False
def add(self, uses = {}, clobbers = []):
for use in uses:
if use in self.uses:
raise ValueError('conflict: already using {:s}'.format(repr(use)))
self.uses[use] = uses[use]
for clobber in clobbers:
self.clobbers.add(clobber)
# returns the value if already set, and the check passes. if the check fails, raises
# an exception. returns false (and does't check!) if not already set.
def check_or_set_use(self, rn, pred, default):
if rn in self.uses:
if not pred(self.uses[rn]):
raise ValueError('conflict: predicate {:s} failed for {:x}: {:s}'
.format(repr(pred), rn, repr(self.uses[rn])))
return self.uses[rn]
else:
if not pred(default):
raise ValueError('conflict: predicate {:s} failed for {:x}: {:s}'
.format(repr(pred), rn, repr(default)))
self.uses[rn] = default
return False
def overwrite_or_set_use(self, rn, x):
if rn in self.uses:
self.uses[rn] = x
# we did overwrite
return True
else:
self.uses[rn] = x
# set the value anyway
return False
# helpful predicates:
def has_immediate(mode):
if mode in {'X(Rn)', 'ADDR', '&ADDR', '#@N', '#N'}:
return True
elif mode in {'Rn', '#1', '@Rn', '@Rn+', 'none'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
def has_reg(mode):
if mode in {'Rn', 'X(Rn)', '@Rn', '@Rn+'}:
return True
elif mode in {'ADDR', '&ADDR', '#1', '#@N', '#N', 'none'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
# Will return None if the mode is not a cg mode. Otherwise will return
# the constant being generated, which might be 0 (which is False).
def has_cg(mode, rn):
if mode == 'Rn':
if rn == 3:
return 0 # the same as reading the register
elif mode == 'X(Rn)':
if rn == 2:
return 0 # alternative encoding of &ADDR mode
elif rn == 3:
return 1 # alternative encoding of #1 mode
elif mode == '@Rn':
if rn == 2:
return 4
elif rn == 3:
return 2
elif mode == '@Rn+':
if rn == 2:
return 8
elif rn == 3:
return -1
return None
def uses_addr(mode, rn):
if mode in {'X(Rn)', 'ADDR', '&ADDR', '@Rn', '@Rn+'}:
return not has_cg(mode, rn)
elif mode in {'Rn', '#1', '#@N', '#N', 'none'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
def uses_reg(mode, rn):
if mode in {'Rn', 'X(Rn)', '@Rn', '@Rn+'}:
return has_cg(mode, rn) is not None
elif mode in {'ADDR', '&ADDR', '#1', '#@N', '#N', 'none'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
def modifies_destination(name):
if name in {'MOV', 'ADD', 'ADDC', 'SUBC', 'SUB', 'DADD', 'BIC', 'BIS', 'XOR', 'AND',
'RRC', 'SWPB', 'RRA', 'SXT'}:
return True
else:
return False
def modifies_sr(name):
if name in {'ADD', 'ADDC', 'SUBC', 'SUB', 'CMP', 'DADD', 'BIT', 'XOR', 'AND',
'RRC', 'RRA', 'RETI', 'SXT'}:
return True
else:
return False
# assembly with dynamic computation of symbols
def assemble_sym(name, smode, dmode, symfields, pc, labels):
fields = {}
for fieldname in symfields:
sym_v = symfields[fieldname]
if isinstance(sym_v, tuple):
if sym_v[0] == 'PC_ABS':
addr = sym_v[1]
offs = pc
if fieldname in {'isrc'}:
offs += 2
elif fieldname in {'idst'}:
offs += 2
if has_immediate(smode):
offs += 2
v = (addr - offs) & 0xffff #TODO hard-coded 16-bit immediate
elif sym_v[0] == 'LABEL':
# initial implementation: immediate lookup
v = labels[sym_v[1]]
# This requires all of the addresses to be precomputed if we want to
# be able to jump to labels after this instruction.
elif sym_v[0] == 'JLABEL':
# offset to jump label
addr = labels[sym_v[1]]
offs = pc + 2
immediate = (addr - offs) & 0x7ff #TODO hard-coded 11-bit immediate
v = immediate >> 1 & 0x3ff #TODO hard-coded 9-bit immediate
elif sym_v[0] == 'JSIGN':
# sign for offset to jump label
addr = labels[sym_v[1]]
offs = pc + 2
immediate = (addr - offs) & 0x7ff #TODO hard-coded 11-bit immediate
v = immediate >> 10 & 0x1
else:
raise ValueError('unsupported assembly directive: {:s}'.format(sym_v[0]))
else:
v = sym_v
fields[fieldname] = v
return assemble(name, smode, dmode, fields)
def assemble_symregion(instructions, base_pc, labels = {}):
# precompute addresses of labels
pc_pre = base_pc
for args in instructions:
if isinstance(args, str):
labels[args] = pc_pre
else:
name, smode, dmode, fields = args
pc_pre += 2
if has_immediate(smode):
pc_pre += 2
if has_immediate(dmode):
pc_pre += 2
# go back and generate encoding
words = []
pc = base_pc
for args in instructions:
if isinstance(args, str):
assert labels[args] == pc
else:
new_words = assemble_sym(*(args + (pc, labels)))
pc += len(new_words) * 2
words += new_words
# for label in labels:
# print('{:s} : {:s}'.format(label, hex(labels[label])))
assert pc == pc_pre
return words
def region_size(instructions):
size = 0
for args in instructions:
if isinstance(args, str):
# label, skip
continue
else:
name, smode, dmode, fields = args
size += 2
if has_immediate(smode):
size += 2
if has_immediate(dmode):
size += 2
return size
|
mit
| 1,911,821,831,308,556,000
| 33.164444
| 89
| 0.525433
| false
| 3.704578
| false
| false
| false
|
myfavouritekk/TPN
|
tools/propagate/sequence_roi_propagation.py
|
1
|
6286
|
#!/usr/bin/env python
# --------------------------------------------------------
# Test regression propagation on ImageNet VID video
# Modified by Kai KANG (myfavouritekk@gmail.com)
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import argparse
import pprint
import time
import os
import os.path as osp
import sys
import cPickle
import numpy as np
this_dir = osp.dirname(__file__)
# add caffe-mpi path
sys.path.insert(0, osp.join(this_dir, '../../external/caffe-mpi/build/install/python'))
import caffe
# add py-faster-rcnn paths
sys.path.insert(0, osp.join(this_dir, '../../external/py-faster-rcnn/lib'))
from fast_rcnn.craft import sequence_im_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
# add external libs
sys.path.insert(0, osp.join(this_dir, '../../external'))
from vdetlib.utils.protocol import proto_load, proto_dump
# add src libs
sys.path.insert(0, osp.join(this_dir, '../../src'))
from tpn.propagate import sequence_roi_propagation
from tpn.target import add_track_targets
from tpn.data_io import save_track_proto_to_zip
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('vid_file')
parser.add_argument('box_file')
parser.add_argument('save_file', help='Save zip file')
parser.add_argument('--annot_file', default=None,
help='Ground truth annotation file. [None]')
parser.add_argument('--job', dest='job_id', help='Job slot, GPU ID + 1. [1]',
default=1, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--param', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--num_per_batch', dest='boxes_num_per_batch',
help='split boxes to batches. [32]',
default=32, type=int)
parser.add_argument('--bbox_mean', dest='bbox_mean',
help='the mean of bbox',
default=None, type=str)
parser.add_argument('--bbox_std', dest='bbox_std',
help='the std of bbox',
default=None, type=str)
parser.add_argument('--bbox_pred_layer', dest='bbox_pred_layer',
help='Layer name for bbox regression layer in feature net.',
default='bbox_pred_vid', type=str)
parser.add_argument('--length', type=int, default=9,
help='Propagation length. [9]')
parser.add_argument('--sample_rate', type=int, default=1,
help='Temporal subsampling rate. [1]')
parser.add_argument('--offset', type=int, default=0,
help='Offset of sampling. [0]')
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--gpus', nargs='+', default=None, type=int, help='Available GPUs.')
parser.add_argument('--zip', action='store_true',
help='Save as zip files rather than track protocols')
parser.add_argument('--keep_feat', action='store_true',
help='Keep feature.')
parser.set_defaults(vis=False, zip=False, keep_feat=False)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print 'Called with args:'
print args
if osp.isfile(args.save_file):
print "{} already exists.".format(args.save_file)
sys.exit(1)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.job_id - 1
print 'Using config:'
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print 'Waiting for {} to exist...'.format(args.caffemodel)
time.sleep(10)
caffe.set_mode_gpu()
if args.gpus is None:
caffe.set_device(args.job_id - 1)
else:
assert args.job_id <= len(args.gpus)
caffe.set_device(args.gpus[args.job_id-1])
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
# apply bbox regression normalization on the net weights
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params[args.bbox_pred_layer][0].data[...] = \
net.params[args.bbox_pred_layer][0].data * bbox_stds[:, np.newaxis]
net.params[args.bbox_pred_layer][1].data[...] = \
net.params[args.bbox_pred_layer][1].data * bbox_stds + bbox_means
vid_proto = proto_load(args.vid_file)
box_proto = proto_load(args.box_file)
window = net.params[args.bbox_pred_layer][0].data.shape[0] / 4 + 1
track_proto = sequence_roi_propagation(vid_proto, box_proto, net, sequence_im_detect,
window = window,
length=args.length, sample_rate=args.sample_rate,
keep_feat=args.keep_feat, batch_size=args.boxes_num_per_batch)
# add ground truth targets if annotation file is given
if args.annot_file is not None:
annot_proto = proto_load(args.annot_file)
add_track_targets(track_proto, annot_proto)
if args.zip:
save_track_proto_to_zip(track_proto, args.save_file)
else:
proto_dump(track_proto, args.save_file)
|
mit
| 6,455,828,710,421,682,000
| 37.802469
| 92
| 0.592587
| false
| 3.598168
| false
| false
| false
|
TomasTomecek/osbs
|
osbs/exceptions.py
|
1
|
1766
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Exceptions raised by OSBS
"""
from traceback import format_tb
class OsbsException(Exception):
def __init__(self, message=None, cause=None, traceback=None):
if message is None and cause is not None:
message = repr(cause)
super(OsbsException, self).__init__(message)
self.message = message
self.cause = cause
self.traceback = traceback
def __str__(self):
if self.cause and self.traceback and not hasattr(self, '__context__'):
return ("%s\n\n" % self.message +
"Original traceback (most recent call last):\n" +
"".join(format_tb(self.traceback)) +
"%r" % self.cause)
else:
return super(OsbsException, self).__str__()
def __repr__(self):
if self.cause and not hasattr(self, '__context__'):
return "OsbsException caused by %r" % self.cause
else:
return super(OsbsException, self).__repr__()
class OsbsResponseException(OsbsException):
""" OpenShift didn't respond with OK (200) status """
def __init__(self, message, status_code, *args, **kwargs):
super(OsbsResponseException, self).__init__(message, *args, **kwargs)
self.status_code = status_code
class OsbsNetworkException(OsbsException):
def __init__(self, url, message, status_code, *args, **kwargs):
super(OsbsNetworkException, self).__init__(message, *args, **kwargs)
self.url = url
self.status_code = status_code
class OsbsValidationException(OsbsException):
pass
|
bsd-3-clause
| -7,734,197,998,496,036,000
| 29.982456
| 78
| 0.61778
| false
| 4.031963
| false
| false
| false
|
baderj/domain_generation_algorithms
|
proslikefan/dga.py
|
1
|
1425
|
import argparse
from ctypes import c_int
from datetime import datetime
def dga(date, magic, tlds):
# tlds = ["eu", "biz", "se", "info", "com", "net", "org", "ru", "in",
# "name"]
for i in range(10):
for tld in tlds:
seed_string = '.'.join([str(s) for s in
[magic, date.month, date.day, date.year, tld]])
r = abs(hash_string(seed_string)) + i
domain = ""
k = 0
while(k < r % 7 + 6):
r = abs(hash_string(domain + str(r)))
domain += chr(r % 26 + ord('a'))
k += 1
domain += '.' + tld
print(domain)
def hash_string(s):
h = c_int(0)
for c in s:
h.value = (h.value << 5) - h.value + ord(c)
return h.value
if __name__=="__main__":
""" known magic seeds are "prospect" and "OK" """
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--date", help="date for which to generate domains")
parser.add_argument("-m", "--magic", help="magic string",
default="prospect")
parser.add_argument("-t", "--tlds", nargs="+", help="tlds",
default=["eu", "biz", "se", "info", "com", "net", "org", "ru", "in", "name"])
args = parser.parse_args()
if args.date:
d = datetime.strptime(args.date, "%Y-%m-%d")
else:
d = datetime.now()
dga(d, args.magic, args.tlds)
|
gpl-2.0
| -7,406,160,187,670,002,000
| 32.139535
| 85
| 0.490526
| false
| 3.275862
| false
| false
| false
|
fujicoin/fujicoin
|
test/functional/wallet_listreceivedby.py
|
1
|
8103
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listreceivedbyaddress RPC."""
from decimal import Decimal
from test_framework.test_framework import FujicoinTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
assert_raises_rpc_error,
)
from test_framework.wallet_util import test_address
class ReceivedByTest(FujicoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_cli()
def run_test(self):
# Generate block to get out of IBD
self.nodes[0].generate(1)
self.sync_blocks()
# save the number of coinbase reward addresses so far
num_cb_reward_addresses = len(self.nodes[1].listreceivedbyaddress(minconf=0, include_empty=True, include_watchonly=True))
self.log.info("listreceivedbyaddress Test")
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check not listed in listreceivedbyaddress because has 0 confirmations
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address": addr},
{},
True)
# Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address": addr},
{"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]})
# With min confidence < 10
assert_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address": addr},
{"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]})
# With min confidence > 10, should not find Tx
assert_array_result(self.nodes[1].listreceivedbyaddress(11), {"address": addr}, {}, True)
# Empty Tx
empty_addr = self.nodes[1].getnewaddress()
assert_array_result(self.nodes[1].listreceivedbyaddress(0, True),
{"address": empty_addr},
{"address": empty_addr, "label": "", "amount": 0, "confirmations": 0, "txids": []})
# Test Address filtering
# Only on addr
expected = {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}
res = self.nodes[1].listreceivedbyaddress(minconf=0, include_empty=True, include_watchonly=True, address_filter=addr)
assert_array_result(res, {"address": addr}, expected)
assert_equal(len(res), 1)
# Test for regression on CLI calls with address string (#14173)
cli_res = self.nodes[1].cli.listreceivedbyaddress(0, True, True, addr)
assert_array_result(cli_res, {"address": addr}, expected)
assert_equal(len(cli_res), 1)
# Error on invalid address
assert_raises_rpc_error(-4, "address_filter parameter was invalid", self.nodes[1].listreceivedbyaddress, minconf=0, include_empty=True, include_watchonly=True, address_filter="bamboozling")
# Another address receive money
res = self.nodes[1].listreceivedbyaddress(0, True, True)
assert_equal(len(res), 2 + num_cb_reward_addresses) # Right now 2 entries
other_addr = self.nodes[1].getnewaddress()
txid2 = self.nodes[0].sendtoaddress(other_addr, 0.1)
self.nodes[0].generate(1)
self.sync_all()
# Same test as above should still pass
expected = {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 11, "txids": [txid, ]}
res = self.nodes[1].listreceivedbyaddress(0, True, True, addr)
assert_array_result(res, {"address": addr}, expected)
assert_equal(len(res), 1)
# Same test as above but with other_addr should still pass
expected = {"address": other_addr, "label": "", "amount": Decimal("0.1"), "confirmations": 1, "txids": [txid2, ]}
res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr)
assert_array_result(res, {"address": other_addr}, expected)
assert_equal(len(res), 1)
# Should be two entries though without filter
res = self.nodes[1].listreceivedbyaddress(0, True, True)
assert_equal(len(res), 3 + num_cb_reward_addresses) # Became 3 entries
# Not on random addr
other_addr = self.nodes[0].getnewaddress() # note on node[0]! just a random addr
res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr)
assert_equal(len(res), 0)
self.log.info("getreceivedbyaddress Test")
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
assert_equal(balance, Decimal("0.0"))
# Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr, 0)
assert_equal(balance, Decimal("0.1"))
# Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
assert_equal(balance, Decimal("0.1"))
# Trying to getreceivedby for an address the wallet doesn't own should return an error
assert_raises_rpc_error(-4, "Address not found in wallet", self.nodes[0].getreceivedbyaddress, addr)
self.log.info("listreceivedbylabel + getreceivedbylabel Test")
# set pre-state
label = ''
address = self.nodes[1].getnewaddress()
test_address(self.nodes[1], address, labels=[label])
received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel() if r["label"] == label][0]
balance_by_label = self.nodes[1].getreceivedbylabel(label)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbylabel should return received_by_label_json because of 0 confirmations
assert_array_result(self.nodes[1].listreceivedbylabel(),
{"label": label},
received_by_label_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbylabel(label)
assert_equal(balance, balance_by_label)
self.nodes[1].generate(10)
self.sync_all()
# listreceivedbylabel should return updated received list
assert_array_result(self.nodes[1].listreceivedbylabel(),
{"label": label},
{"label": received_by_label_json["label"], "amount": (received_by_label_json["amount"] + Decimal("0.1"))})
# getreceivedbylabel should return updated receive total
balance = self.nodes[1].getreceivedbylabel(label)
assert_equal(balance, balance_by_label + Decimal("0.1"))
# Create a new label named "mynewlabel" that has a 0 balance
address = self.nodes[1].getnewaddress()
self.nodes[1].setlabel(address, "mynewlabel")
received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel(0, True) if r["label"] == "mynewlabel"][0]
# Test includeempty of listreceivedbylabel
assert_equal(received_by_label_json["amount"], Decimal("0.0"))
# Test getreceivedbylabel for 0 amount labels
balance = self.nodes[1].getreceivedbylabel("mynewlabel")
assert_equal(balance, Decimal("0.0"))
if __name__ == '__main__':
ReceivedByTest().main()
|
mit
| -8,886,061,812,897,744,000
| 46.385965
| 197
| 0.623473
| false
| 3.8549
| true
| false
| false
|
maoy/zknova
|
nova/api/openstack/compute/contrib/security_groups.py
|
1
|
21752
|
# Copyright 2011 OpenStack LLC.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security groups extension."""
from xml.dom import minidom
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import api as compute_api
from nova import db
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'security_groups')
softauth = extensions.soft_extension_authorizer('compute', 'security_groups')
def make_rule(elem):
elem.set('id')
elem.set('parent_group_id')
proto = xmlutil.SubTemplateElement(elem, 'ip_protocol')
proto.text = 'ip_protocol'
from_port = xmlutil.SubTemplateElement(elem, 'from_port')
from_port.text = 'from_port'
to_port = xmlutil.SubTemplateElement(elem, 'to_port')
to_port.text = 'to_port'
group = xmlutil.SubTemplateElement(elem, 'group', selector='group')
name = xmlutil.SubTemplateElement(group, 'name')
name.text = 'name'
tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id')
tenant_id.text = 'tenant_id'
ip_range = xmlutil.SubTemplateElement(elem, 'ip_range',
selector='ip_range')
cidr = xmlutil.SubTemplateElement(ip_range, 'cidr')
cidr.text = 'cidr'
def make_sg(elem):
elem.set('id')
elem.set('tenant_id')
elem.set('name')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
rules = xmlutil.SubTemplateElement(elem, 'rules')
rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules')
make_rule(rule)
sg_nsmap = {None: wsgi.XMLNS_V11}
class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group_rule',
selector='security_group_rule')
make_rule(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group',
selector='security_group')
make_sg(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_groups')
elem = xmlutil.SubTemplateElement(root, 'security_group',
selector='security_groups')
make_sg(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
dom = minidom.parseString(string)
security_group = {}
sg_node = self.find_first_child_named(dom,
'security_group')
if sg_node is not None:
if sg_node.hasAttribute('name'):
security_group['name'] = sg_node.getAttribute('name')
desc_node = self.find_first_child_named(sg_node,
"description")
if desc_node:
security_group['description'] = self.extract_text(desc_node)
return {'body': {'security_group': security_group}}
class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
dom = minidom.parseString(string)
security_group_rule = self._extract_security_group_rule(dom)
return {'body': {'security_group_rule': security_group_rule}}
def _extract_security_group_rule(self, node):
"""Marshal the security group rule attribute of a parsed request."""
sg_rule = {}
sg_rule_node = self.find_first_child_named(node,
'security_group_rule')
if sg_rule_node is not None:
ip_protocol_node = self.find_first_child_named(sg_rule_node,
"ip_protocol")
if ip_protocol_node is not None:
sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node)
from_port_node = self.find_first_child_named(sg_rule_node,
"from_port")
if from_port_node is not None:
sg_rule['from_port'] = self.extract_text(from_port_node)
to_port_node = self.find_first_child_named(sg_rule_node, "to_port")
if to_port_node is not None:
sg_rule['to_port'] = self.extract_text(to_port_node)
parent_group_id_node = self.find_first_child_named(sg_rule_node,
"parent_group_id")
if parent_group_id_node is not None:
sg_rule['parent_group_id'] = self.extract_text(
parent_group_id_node)
group_id_node = self.find_first_child_named(sg_rule_node,
"group_id")
if group_id_node is not None:
sg_rule['group_id'] = self.extract_text(group_id_node)
cidr_node = self.find_first_child_named(sg_rule_node, "cidr")
if cidr_node is not None:
sg_rule['cidr'] = self.extract_text(cidr_node)
return sg_rule
class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = NativeSecurityGroupAPI()
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _format_security_group_rule(self, context, rule):
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
source_group = self.security_group_api.get(context,
id=rule['group_id'])
sg_rule['group'] = {'name': source_group.name,
'tenant_id': source_group.project_id}
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
security_group['rules'] += [self._format_security_group_rule(
context, rule)]
return security_group
def _authorize_context(self, req):
context = req.environ['nova.context']
authorize(context)
return context
def _validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
raise exc.HTTPBadRequest(explanation=msg)
def _from_body(self, body, key):
if not body:
raise exc.HTTPUnprocessableEntity()
value = body.get(key, None)
if value is None:
raise exc.HTTPUnprocessableEntity()
return value
class SecurityGroupController(SecurityGroupControllerBase):
"""The Security group API controller for the OpenStack API."""
@wsgi.serializers(xml=SecurityGroupTemplate)
def show(self, req, id):
"""Return data about the given security group."""
context = self._authorize_context(req)
id = self._validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
return {'security_group': self._format_security_group(context,
security_group)}
def delete(self, req, id):
"""Delete a security group."""
context = self._authorize_context(req)
id = self._validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
return webob.Response(status_int=202)
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req):
"""Returns a list of security groups."""
context = self._authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
raw_groups = self.security_group_api.list(context,
project=context.project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
@wsgi.serializers(xml=SecurityGroupTemplate)
@wsgi.deserializers(xml=SecurityGroupXMLDeserializer)
def create(self, req, body):
"""Creates a new security group."""
context = self._authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create(context, group_name,
group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupRuleTemplate)
@wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer)
def create(self, req, body):
context = self._authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
parent_group_id = self._validate_id(sg_rule.get('parent_group_id',
None))
security_group = self.security_group_api.get(context, None,
parent_group_id, map_exception=True)
try:
values = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except Exception as exp:
raise exc.HTTPBadRequest(explanation=unicode(exp))
if values is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
values['parent_group_id'] = security_group.id
if self.security_group_api.rule_exists(security_group, values):
msg = _('This rule already exists in group %s') % parent_group_id
raise exc.HTTPBadRequest(explanation=msg)
security_group_rule = self.security_group_api.add_rules(
context, parent_group_id, security_group['name'], [values])[0]
return {"security_group_rule": self._format_security_group_rule(
context,
security_group_rule)}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self._validate_id(group_id)
#check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def delete(self, req, id):
context = self._authorize_context(req)
id = self._validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule.parent_group_id
security_group = self.security_group_api.get(context, None, group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
return webob.Response(status_int=202)
class ServerSecurityGroupController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = self._authorize_context(req)
self.security_group_api.ensure_default(context)
try:
instance = self.compute_api.get(context, server_id)
except exception.InstanceNotFound as exp:
raise exc.HTTPNotFound(explanation=unicode(exp))
groups = db.security_group_get_by_instance(context, instance['id'])
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = NativeSecurityGroupAPI()
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
try:
instance = self.compute_api.get(context, id)
method(context, instance, group_name)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=unicode(exp))
except exception.InstanceNotFound as exp:
raise exc.HTTPNotFound(explanation=unicode(exp))
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=unicode(exp))
return webob.Response(status_int=202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'addSecurityGroup')
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'removeSecurityGroup')
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _extend_servers(self, req, servers):
key = "security_groups"
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[key] = [{"name": group["name"]} for group in groups]
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
if 'server' in resp_obj.obj:
resp_obj.attach(xml=SecurityGroupServerTemplate())
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
resp_obj.attach(xml=SecurityGroupServersTemplate())
self._extend_servers(req, list(resp_obj.obj['servers']))
class SecurityGroupsTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return "security_groups" in datum
def make_server(elem):
secgrps = SecurityGroupsTemplateElement('security_groups')
elem.append(secgrps)
secgrp = xmlutil.SubTemplateElement(secgrps, 'security_group',
selector="security_groups")
secgrp.set('name')
class SecurityGroupServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
make_server(root)
return xmlutil.SlaveTemplate(root, 1)
class SecurityGroupServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
return xmlutil.SlaveTemplate(root, 1)
class Security_groups(extensions.ExtensionDescriptor):
"""Security group support."""
name = "SecurityGroups"
alias = "os-security-groups"
namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1"
updated = "2011-07-21T00:00:00+00:00"
def get_controller_extensions(self):
controller = SecurityGroupActionController()
actions = extensions.ControllerExtension(self, 'servers', controller)
controller = SecurityGroupsOutputController()
output = extensions.ControllerExtension(self, 'servers', controller)
return [actions, output]
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-security-groups',
controller=SecurityGroupController())
resources.append(res)
res = extensions.ResourceExtension('os-security-group-rules',
controller=SecurityGroupRulesController())
resources.append(res)
res = extensions.ResourceExtension(
'os-security-groups',
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
class NativeSecurityGroupAPI(compute_api.SecurityGroupAPI):
@staticmethod
def raise_invalid_property(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_group_already_exists(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_invalid_group(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
raise exception.InvalidCidr(cidr=cidr)
@staticmethod
def raise_over_quota(msg):
raise exception.SecurityGroupLimitExceeded(msg)
@staticmethod
def raise_not_found(msg):
raise exc.HTTPNotFound(explanation=msg)
|
apache-2.0
| 8,453,430,313,926,961,000
| 36.37457
| 79
| 0.594796
| false
| 4.262591
| false
| false
| false
|
vaidap/zulip
|
zerver/views/home.py
|
1
|
11809
|
from __future__ import absolute_import
from typing import Any, List, Dict, Optional, Text
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, HttpRequest
from django.shortcuts import redirect, render
from django.utils import translation
from django.utils.cache import patch_cache_control
from six.moves import zip_longest, zip, range
from zerver.decorator import zulip_login_required, process_client
from zerver.forms import ToSForm
from zerver.lib.realm_icon import realm_icon_url
from zerver.models import Message, UserProfile, Stream, Subscription, Huddle, \
Recipient, Realm, UserMessage, DefaultStream, RealmEmoji, RealmDomain, \
RealmFilter, PreregistrationUser, UserActivity, \
UserPresence, get_recipient, name_changes_disabled, email_to_username, \
get_realm_domains
from zerver.lib.events import do_events_register
from zerver.lib.actions import update_user_presence, do_change_tos_version, \
do_update_pointer, realm_user_count
from zerver.lib.avatar import avatar_url
from zerver.lib.i18n import get_language_list, get_language_name, \
get_language_list_for_templates
from zerver.lib.push_notifications import num_push_devices_for_user
from zerver.lib.streams import access_stream_by_name
from zerver.lib.utils import statsd, get_subdomain
import calendar
import datetime
import logging
import os
import re
import simplejson
import time
@zulip_login_required
def accounts_accept_terms(request):
# type: (HttpRequest) -> HttpResponse
if request.method == "POST":
form = ToSForm(request.POST)
if form.is_valid():
do_change_tos_version(request.user, settings.TOS_VERSION)
return redirect(home)
else:
form = ToSForm()
email = request.user.email
special_message_template = None
if request.user.tos_version is None and settings.FIRST_TIME_TOS_TEMPLATE is not None:
special_message_template = 'zerver/' + settings.FIRST_TIME_TOS_TEMPLATE
return render(
request,
'zerver/accounts_accept_terms.html',
context={'form': form,
'email': email,
'special_message_template': special_message_template},
)
def sent_time_in_epoch_seconds(user_message):
# type: (Optional[UserMessage]) -> Optional[float]
if user_message is None:
return None
# We have USE_TZ = True, so our datetime objects are timezone-aware.
# Return the epoch seconds in UTC.
return calendar.timegm(user_message.message.pub_date.utctimetuple())
def home(request):
# type: (HttpRequest) -> HttpResponse
if settings.DEVELOPMENT and os.path.exists('var/handlebars-templates/compile.error'):
response = render(request, 'zerver/handlebars_compilation_failed.html')
response.status_code = 500
return response
if not settings.SUBDOMAINS_HOMEPAGE:
return home_real(request)
# If settings.SUBDOMAINS_HOMEPAGE, sends the user the landing
# page, not the login form, on the root domain
subdomain = get_subdomain(request)
if subdomain != "":
return home_real(request)
return render(request, 'zerver/hello.html')
@zulip_login_required
def home_real(request):
# type: (HttpRequest) -> HttpResponse
# We need to modify the session object every two weeks or it will expire.
# This line makes reloading the page a sufficient action to keep the
# session alive.
request.session.modified = True
user_profile = request.user
# If a user hasn't signed the current Terms of Service, send them there
if settings.TERMS_OF_SERVICE is not None and settings.TOS_VERSION is not None and \
int(settings.TOS_VERSION.split('.')[0]) > user_profile.major_tos_version():
return accounts_accept_terms(request)
narrow = [] # type: List[List[Text]]
narrow_stream = None
narrow_topic = request.GET.get("topic")
if request.GET.get("stream"):
try:
narrow_stream_name = request.GET.get("stream")
(narrow_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, narrow_stream_name)
narrow = [["stream", narrow_stream.name]]
except Exception:
logging.exception("Narrow parsing")
if narrow_stream is not None and narrow_topic is not None:
narrow.append(["topic", narrow_topic])
register_ret = do_events_register(user_profile, request.client,
apply_markdown=True, narrow=narrow)
user_has_messages = (register_ret['max_message_id'] != -1)
# Reset our don't-spam-users-with-email counter since the
# user has since logged in
if user_profile.last_reminder is not None:
user_profile.last_reminder = None
user_profile.save(update_fields=["last_reminder"])
# Brand new users get the tutorial
needs_tutorial = settings.TUTORIAL_ENABLED and \
user_profile.tutorial_status != UserProfile.TUTORIAL_FINISHED
first_in_realm = realm_user_count(user_profile.realm) == 1
# If you are the only person in the realm and you didn't invite
# anyone, we'll continue to encourage you to do so on the frontend.
prompt_for_invites = first_in_realm and \
not PreregistrationUser.objects.filter(referred_by=user_profile).count()
if user_profile.pointer == -1 and user_has_messages:
# Put the new user's pointer at the bottom
#
# This improves performance, because we limit backfilling of messages
# before the pointer. It's also likely that someone joining an
# organization is interested in recent messages more than the very
# first messages on the system.
register_ret['pointer'] = register_ret['max_message_id']
user_profile.last_pointer_updater = request.session.session_key
if user_profile.pointer == -1:
latest_read = None
else:
try:
latest_read = UserMessage.objects.get(user_profile=user_profile,
message__id=user_profile.pointer)
except UserMessage.DoesNotExist:
# Don't completely fail if your saved pointer ID is invalid
logging.warning("%s has invalid pointer %s" % (user_profile.email, user_profile.pointer))
latest_read = None
# Set default language and make it persist
default_language = register_ret['default_language']
url_lang = '/{}'.format(request.LANGUAGE_CODE)
if not request.path.startswith(url_lang):
translation.activate(default_language)
request.session[translation.LANGUAGE_SESSION_KEY] = default_language
# Pass parameters to the client-side JavaScript code.
# These end up in a global JavaScript Object named 'page_params'.
page_params = dict(
# Server settings.
development_environment = settings.DEVELOPMENT,
debug_mode = settings.DEBUG,
test_suite = settings.TEST_SUITE,
poll_timeout = settings.POLL_TIMEOUT,
login_page = settings.HOME_NOT_LOGGED_IN,
server_uri = settings.SERVER_URI,
maxfilesize = settings.MAX_FILE_UPLOAD_SIZE,
max_avatar_file_size = settings.MAX_AVATAR_FILE_SIZE,
server_generation = settings.SERVER_GENERATION,
use_websockets = settings.USE_WEBSOCKETS,
save_stacktraces = settings.SAVE_FRONTEND_STACKTRACES,
server_inline_image_preview = settings.INLINE_IMAGE_PREVIEW,
server_inline_url_embed_preview = settings.INLINE_URL_EMBED_PREVIEW,
password_min_length = settings.PASSWORD_MIN_LENGTH,
password_min_quality = settings.PASSWORD_MIN_ZXCVBN_QUALITY,
# Misc. extra data.
have_initial_messages = user_has_messages,
initial_servertime = time.time(), # Used for calculating relative presence age
default_language_name = get_language_name(register_ret['default_language']),
language_list_dbl_col = get_language_list_for_templates(register_ret['default_language']),
language_list = get_language_list(),
needs_tutorial = needs_tutorial,
first_in_realm = first_in_realm,
prompt_for_invites = prompt_for_invites,
furthest_read_time = sent_time_in_epoch_seconds(latest_read),
has_mobile_devices = num_push_devices_for_user(user_profile) > 0,
)
undesired_register_ret_fields = [
'streams',
]
for field_name in set(register_ret.keys()) - set(undesired_register_ret_fields):
page_params[field_name] = register_ret[field_name]
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = get_recipient(Recipient.STREAM, narrow_stream.id)
try:
initial_pointer = Message.objects.filter(recipient=recipient).order_by('id').reverse()[0].id
except IndexError:
initial_pointer = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = initial_pointer
page_params["pointer"] = initial_pointer
page_params["have_initial_messages"] = (initial_pointer != -1)
page_params["enable_desktop_notifications"] = False
statsd.incr('views.home')
show_invites = True
# Some realms only allow admins to invite users
if user_profile.realm.invite_by_admins_only and not user_profile.is_realm_admin:
show_invites = False
request._log_data['extra'] = "[%s]" % (register_ret["queue_id"],)
response = render(request, 'zerver/index.html',
context={'user_profile': user_profile,
'page_params': simplejson.encoder.JSONEncoderForHTML().encode(page_params),
'nofontface': is_buggy_ua(request.META.get("HTTP_USER_AGENT", "Unspecified")),
'avatar_url': avatar_url(user_profile),
'show_debug':
settings.DEBUG and ('show_debug' in request.GET),
'pipeline': settings.PIPELINE_ENABLED,
'show_invites': show_invites,
'is_admin': user_profile.is_realm_admin,
'show_webathena': user_profile.realm.webathena_enabled,
'enable_feedback': settings.ENABLE_FEEDBACK,
'embedded': narrow_stream is not None,
},)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
return response
@zulip_login_required
def desktop_home(request):
# type: (HttpRequest) -> HttpResponse
return HttpResponseRedirect(reverse('zerver.views.home.home'))
def apps_view(request, _):
# type: (HttpRequest, Text) -> HttpResponse
if settings.ZILENCER_ENABLED:
return render(request, 'zerver/apps.html')
return HttpResponseRedirect('https://zulipchat.com/apps/', status=301)
def is_buggy_ua(agent):
# type: (str) -> bool
"""Discrimiate CSS served to clients based on User Agent
Due to QTBUG-3467, @font-face is not supported in QtWebKit.
This may get fixed in the future, but for right now we can
just serve the more conservative CSS to all our desktop apps.
"""
return ("Humbug Desktop/" in agent or "Zulip Desktop/" in agent or "ZulipDesktop/" in agent) and \
"Mac" not in agent
|
apache-2.0
| -5,072,437,405,447,221,000
| 43.394737
| 109
| 0.65594
| false
| 3.981457
| true
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.