repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Southpaw-TACTIC/TACTIC
|
src/pyasm/web/palette.py
|
1
|
13401
|
###########################################################
#
# Copyright (c) 2010, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['Palette']
from pyasm.common import Container, Config, Common
from pyasm.search import Search
import colorsys, types
class Palette(object):
# default color palette
DEFAULT = {
'color': '#AAA', # main font color
'color2': '#BBB', # secondary font color
'color3': '#222222', # tertiary font color
'background': '#444444', # main background color
'background2': '#2F2F2F', # secondary background color
'background3': '#777777', # tertiary background color
'border': '#737b79', # main border color
'shadow': '#000000', # main shadow color
'theme': 'dark',
'table_border': '#494949',
'side_bar_title': '#3C76C2',
}
DARK = DEFAULT
BLACK = {
'color': '#AAA', # main font color
'color2': '#AAA', # secondary font color
'color3': '#AAA', # tertiary font color
'background': '#101010', # main background color
'background2': '#100000', # secondary background color
'background3': '#000000', # tertiary background color
'border': '#202020', # main border color
'shadow': '#202020', # main shadow color
'theme': 'dark',
'table_border': '#202020',
'side_bar_title': '#3C76C2',
}
AQUA = {
'color': '#000', # main font color
'color2': '#333', # secondary font color
'color3': '#333', # tertiary font color
'background': '#FFFFFF', # main background color
'background2': '#BBBBBB', # secondary background color
'background3': '#D1D7E2', # tertiary background color
'border': '#BBB', # main border color
'side_bar_title': '#3C76C2',
'side_bar_title_color': '#FFF',
'tab_background': '#3C76C2',
'table_border': '#E0E0E0',
'theme': 'default',
'shadow': 'rgba(0,0,0,0.1)',
}
# silver theme
SILVER = {
'color': '#000', # main font color
'color2': '#333', # secondary font color
'color3': '#333', # tertiary font color
'background': '#DDDDDD', # main background color
'background2': '#777777', # secondary background color
'background3': '#999999', # tertiary background color
'border': '#888888', # main border color
'table_border': '#DDD',
'theme': 'default',
'shadow': 'rgba(0,0,0,0.6)',
'side_bar_title': '#3C76C2',
}
# silver theme
BRIGHT = {
'color': '#000', # main font color
'color2': '#333', # secondary font color
'color3': '#333', # tertiary font color
'background': '#FFFFFF', # main background color
'background2': '#AAAAAA', # secondary background color
'background3': '#EEEEEE', # tertiary background color
'border': '#BBBBBB', # main border color
'table_border': '#E0E0E0',
'theme': 'default',
'shadow': 'rgba(0,0,0,0.6)',
'side_bar_title': '#3C76C2',
}
# bon noche theme
BON_NOCHE = {
'color': '#FFF', # main font color
'color2': '#FFF', # secondary font color
'color3': '#FFF', # tertiary font color
'background': '#060719', # main background color
'background2': '#4C1B2F', # secondary background color
'background3': '#9E332E', # tertiary background color
'border': '#444', # main border color
'table_border': '#060719',
'theme': 'dark'
}
# origami theme
ORIGAMI = {
'color': '#000', # main font color
'color2': '#FFF', # secondary font color
'color3': '#000', # tertiary font color
'background': '#E8FAC8', # main background color
'background2': '#8C8015', # secondary background color
'background3': '#BAB966', # tertiary background color
'border': '#888888', # main border color
'table_border': '#E8FAC8',
'shadow': 'rgba(0,0,0,0.6)',
'theme': 'default'
}
MMS = {
'color': '#FFF', # main font color
'color2': '#000', # secondary font color
'color3': '#000', # tertiary font color
'background': '#00539F', # main background color
'background2': '#CCCCCC', # secondary background color
'background3': '#AAAAAA', # tertiary background color
'border': '#999999', # main border color
'table_border': '#00539F',
'theme': 'default'
}
AVIATOR = {
'color': '#000000', # main font color
'color2': '#FFFFFF', # secondary font color
'color3': '#FFFFFF', # tertiary font color
'background': '#E6D595', # main background color
'background2': '#1A9481', # secondary background color
'background3': '#003D5c', # tertiary background color
'border': '#666666', # main border color
'table_border': '#E6D595',
'theme': 'dark'
}
#COLORS = DEFAULT
#COLORS = SILVER
#COLORS = ORIGAMI
COLORS = AQUA
#COLORS = BRIGHT
#COLORS = BON_NOCHE
#COLORS = MMS
#COLORS = AVIATOR
TABLE = {
'table_hilite': '#F00',
'table_select': '#FF0',
'table_changed': '#FFF',
'header_background': '#FFF'
}
def __init__(self, **kwargs):
self.kwargs = kwargs
self.colors = self.kwargs.get("colors")
palette = self.kwargs.get("palette")
if palette:
self.set_palette(palette)
else:
# look at the project
from pyasm.biz import Project
project = Project.get(no_exception=True)
if project:
value = project.get_value("palette")
self.set_palette(value)
# otherwise look at the user
if not self.colors:
from pyasm.biz import PrefSetting
value = PrefSetting.get_value_by_key("palette")
self.set_palette(value)
# look in the config
if not self.colors:
value = Config.get_value("look", "palette")
self.set_palette(value)
if not self.colors:
self.colors = self.COLORS
# make sure all of the colors are defined
for name, value in self.DEFAULT.items():
# make a special provision for theme!
if name == 'theme':
continue
if not self.colors.get(name):
self.colors[name] = value
def set_palette(self, palette):
value = palette
if not value:
return
try:
self.colors = eval(value)
# make sure all of the colors are defined
for name, value in self.DEFAULT.items():
# make a special provision for theme!
if name == 'theme':
continue
if not self.colors.get(name):
self.colors[name] = value
except:
try:
value = value.upper()
value = value.replace(" ", "_")
self.colors = eval("self.%s" % value)
except:
print("WARNING: palette [%s] does not exist. Using default" % value)
def get_theme(self):
theme = self.colors.get("theme")
if not theme:
theme = "default"
return theme
def get_keys(self):
return self.colors.keys()
def get_colors(self):
return self.colors
def color(self, category, modifier=0, default=None):
if not category:
category = 'background'
# make default adjustments
if category.startswith("#"):
color = category
category = "color"
else:
color = self.colors.get(category)
if not color:
color = self.colors.get(default)
if not color:
color = category
if category == 'background2' and not color:
category = 'background'
modifier += 10
color = self.colors.get(category)
if category == 'color2' and not color:
category = 'color'
modifier += 10
color = self.colors.get(category)
return Common.modify_color(color, modifier)
def modify_color(color, modifier):
return Common.modify_color(color, modifier)
modify_color = staticmethod(modify_color)
"""
if not modifier:
return color
if not color:
return None
color = color.replace("#", '')
if len(color) == 3:
first = "%s%s" % (color[0], color[0])
second = "%s%s" % (color[1], color[1])
third = "%s%s" % (color[2], color[2])
elif len(color) == 6:
first = "%s" % color[0:2]
second = "%s" % color[2:4]
third = "%s" % color[4:6]
first = float(int(first, 16) ) / 256
second = float(int(second, 16) ) / 256
third = float(int(third, 16) ) / 256
if type(modifier) == types.ListType:
rgb = []
rgb.append( 0.01*modifier[0] + first )
rgb.append( 0.01*modifier[1] + second )
rgb.append( 0.01*modifier[2] + third )
else:
hsv = colorsys.rgb_to_hsv(first, second, third)
value = 0.01*modifier + hsv[2]
if value < 0:
value = 0
if value > 1:
value = 1
hsv = (hsv[0], hsv[1], value )
rgb = colorsys.hsv_to_rgb(*hsv)
first = hex(int(rgb[0]*256))[2:]
if len(first) == 1:
first = "0%s" % first
second = hex(int(rgb[1]*256))[2:]
if len(second) == 1:
second = "0%s" % second
third = hex(int(rgb[2]*256))[2:]
if len(third) == 1:
third = "0%s" % third
if len(first) == 3:
first = "FF"
if len(second) == 3:
second = "FF"
if len(third) == 3:
third = "FF"
color = "#%s%s%s" % (first, second, third)
return color
modify_color = staticmethod(modify_color)
"""
def gradient(self, palette_key, modifier=0, range=-20, reverse=False, default=None):
if modifier == None:
modifier = 0
if range == None:
range = -20
from .web_container import WebContainer
web = WebContainer.get_web()
palette = Palette.get()
if web.is_IE():
color = self.color(palette_key, (modifier+range)/2, default=default)
return color
else:
if not reverse:
color1 = self.color(palette_key, modifier, default=default)
color2 = self.color(palette_key, modifier+range, default=default)
else:
color2 = self.color(palette_key, modifier, default=default)
color1 = self.color(palette_key, modifier+range, default=default)
if web.get_browser() == 'Mozilla':
return "-moz-linear-gradient(top, %s, %s)" % (color1, color2)
else:
return "-webkit-gradient(linear, 0%% 0%%, 0%% 100%%, from(%s), to(%s))" % (color1, color2)
def push_palette(cls, palette):
palettes = Container.get("Palette:palettes")
if palettes == None:
palettes = []
Container.put("Palette:palettes", palettes)
palette = Palette(palette=palette)
palettes.append(palette)
push_palette = classmethod(push_palette)
def pop_palette(cls):
palettes = Container.get("Palette:palettes")
if palettes == None:
palettes = []
Container.put("Palette:palettes", palettes)
if len(palettes) == 0:
return palettes[0]
return palettes.pop()
pop_palette = classmethod(pop_palette)
def num_palettes(cls):
palettes = Container.get("Palette:palettes")
if palettes == None:
palettes = []
Container.put("Palette:palettes", palettes)
return len(palettes)
num_palettes = classmethod(num_palettes)
def get(cls):
palettes = Container.get("Palette:palettes")
if palettes == None:
palettes = []
Container.put("Palette:palettes", palettes)
if not palettes:
palette = Palette()
palettes.append(palette)
else:
palette = palettes[-1]
return palette
get = classmethod(get)
def set(cls, palette):
Container.put("Palette:palette", palette)
set = classmethod(set)
|
epl-1.0
| 5,284,234,413,447,526,000
| 28.64823
| 106
| 0.50929
| false
| 3.814688
| false
| false
| false
|
faber03/AndroidMalwareEvaluatingTools
|
framework sources/Alan/dalvikobfuscator/baksmali-modifier.py
|
1
|
2307
|
#!/usr/bin/env python
# Copyright (C) 2012 pleed@dexlabs.org
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from pyparsing import *
InjectedCode = ["nop\n" for i in range(10)]
MethodToken = Literal(".method")
AccessFlag = Literal("public") | \
Literal("private") | \
Literal("protected")| \
Literal("abstract")| \
Literal("static")| \
Literal("constructor")| \
Literal("final")| \
Literal("native") | \
Literal("bridge") | \
Literal("synthetic") | \
Literal("native") | \
Literal("varargs") | \
Literal("declared-synchronized")
JavaType = Word(alphas+"[", alphanums +"_$[;/", min=1)
MethodName = Word(alphas+"$_<", alphanums+"_>$", min=1)
ArgList = JavaType
MethodProtoType = MethodName + Suppress("(") + Optional(ArgList) + Suppress(")") + JavaType
MethodDecl = Suppress(MethodToken) + ZeroOrMore(AccessFlag) + Suppress(MethodProtoType)
def injectnops(filename):
with open(filename, "r") as smalifile:
lines = smalifile.readlines()
modified = []
for index, line in enumerate(lines):
modified.append(line)
if line.startswith(".method"):
try:
flags = list(MethodDecl.parseString(line.strip("\n"),parseAll=True))
except Exception as e:
print line
raise e
if "abstract" not in flags and "native" not in flags:
modified += InjectedCode
with open(filename, "w") as smalifile:
smalifile.writelines(modified)
def run(directory):
for dirpath, dinames, filenames in os.walk(directory):
for filename in filter(lambda x: x.endswith(".smali"), filenames):
injectnops(os.path.join(dirpath, filename))
def usage():
print "%s %s"%(sys.argv[0], sys.argv[1])
print ""
print "inject nops into baksmali files"
if __name__ == "__main__":
if len(sys.argv) != 2:
usage()
else:
run(sys.argv[1])
|
apache-2.0
| -1,676,058,413,938,096,400
| 28.576923
| 91
| 0.682271
| false
| 3.348331
| false
| false
| false
|
lemmingapex/ProjectEuler
|
Problem018/src/MaximumPathSum.py
|
1
|
1162
|
#!/usr/bin/python3
#
# 09/21/2016
# MaximumPathSum.py
# Maximum path sum I
# Maximum path sum II
#
# Scott Wiedemann
#
import sys
class MaximumPathSum:
_triangleData = []
def __init__(self, InputFile):
for line in InputFile:
self._triangleData.append([int(v) for v in line.split()])
return
def sumMaxPath(self):
maxPathData = [row[:] for row in self._triangleData]
i = len(maxPathData) - 2
while i >= 0:
#print(maxPathData[i])
j = len(maxPathData[i]) - 1
while j >= 0:
leftChild = maxPathData[i+1][j]
rightChild = maxPathData[i+1][j+1]
maxPathData[i][j] += max(leftChild, rightChild)
j-=1
i-=1
return maxPathData[0][0]
# main (DRIVER)
def main():
if len(sys.argv) != 2:
print("Incorrect number of arguments.", file=sys.stderr)
print("Usage: " + sys.argv[0] + " Prog.asm\n", file=sys.stderr)
return 1
else:
InputFileName = sys.argv[1]
try:
# read file
InputFile = open(InputFileName, "r")
except IOError:
print("The file \"" + InputFileName + "\" does not exist.\n")
return 2
print(MaximumPathSum(InputFile).sumMaxPath())
return 0
# call to main
if __name__ == "__main__":
main()
|
mit
| 4,385,577,479,959,194,600
| 20.127273
| 65
| 0.638554
| false
| 2.677419
| false
| false
| false
|
open-craft/xblock-mentoring
|
mentoring/mentoring.py
|
1
|
22696
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Harvard
#
# Authors:
# Xavier Antoviaque <xavier@antoviaque.org>
#
# This software's license gives you freedom; you can copy, convey,
# propagate, redistribute and/or modify this program under the terms of
# the GNU Affero General Public License (AGPL) as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version of the AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
#
# Imports ###########################################################
import json
import logging
import uuid
import re
from collections import namedtuple
from lxml import etree
from StringIO import StringIO
from xblock.core import XBlock
from xblock.fields import Boolean, Scope, String, Integer, Float, List
from xblock.fragment import Fragment
from .light_children import XBlockWithLightChildren
from .title import TitleBlock
from .header import SharedHeaderBlock
from .message import MentoringMessageBlock
from .step import StepParentMixin
from .utils import loader
# Globals ###########################################################
log = logging.getLogger(__name__)
def _default_xml_content():
return loader.render_template(
'templates/xml/mentoring_default.xml',
{'url_name': 'mentoring-{}'.format(uuid.uuid4())})
def _is_default_xml_content(value):
UUID_PATTERN = '[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}'
DUMMY_UUID = '12345678-1234-1234-1234-123456789abc'
if value is _default_xml_content:
return True
expected = _default_xml_content()
expected = re.sub(UUID_PATTERN, DUMMY_UUID, expected)
value = re.sub(UUID_PATTERN, DUMMY_UUID, value)
return value == expected
# Classes ###########################################################
Score = namedtuple("Score", ["raw", "percentage", "correct", "incorrect", "partially_correct"])
CORRECT = 'correct'
INCORRECT = 'incorrect'
PARTIAL = 'partial'
class MentoringBlock(XBlockWithLightChildren, StepParentMixin):
"""
An XBlock providing mentoring capabilities
Composed of text, answers input fields, and a set of MRQ/MCQ with advices.
A set of conditions on the provided answers and MCQ/MRQ choices will determine if the
student is a) provided mentoring advices and asked to alter his answer, or b) is given the
ok to continue.
"""
@staticmethod
def is_default_xml_content(value):
return _is_default_xml_content(value)
attempted = Boolean(help="Has the student attempted this mentoring step?",
default=False, scope=Scope.user_state)
completed = Boolean(help="Has the student completed this mentoring step?",
default=False, scope=Scope.user_state)
next_step = String(help="url_name of the next step the student must complete (global to all blocks)",
default='mentoring_first', scope=Scope.preferences)
followed_by = String(help="url_name of the step after the current mentoring block in workflow",
default=None, scope=Scope.content)
url_name = String(help="Name of the current step, used for URL building",
default='mentoring-default', scope=Scope.content)
enforce_dependency = Boolean(help="Should the next step be the current block to complete?",
default=False, scope=Scope.content, enforce_type=True)
display_submit = Boolean(help="Allow submission of the current block?", default=True,
scope=Scope.content, enforce_type=True)
xml_content = String(help="XML content", default=_default_xml_content, scope=Scope.content)
weight = Float(help="Defines the maximum total grade of the block.",
default=1, scope=Scope.content, enforce_type=True)
num_attempts = Integer(help="Number of attempts a user has answered for this questions",
default=0, scope=Scope.user_state, enforce_type=True)
max_attempts = Integer(help="Number of max attempts for this questions", default=0,
scope=Scope.content, enforce_type=True)
mode = String(help="Mode of the mentoring. 'standard' or 'assessment'",
default='standard', scope=Scope.content)
step = Integer(help="Keep track of the student assessment progress.",
default=0, scope=Scope.user_state, enforce_type=True)
student_results = List(help="Store results of student choices.", default=[],
scope=Scope.user_state)
extended_feedback = Boolean(help="Show extended feedback details when all attempts are used up.",
default=False, Scope=Scope.content)
display_name = String(help="Display name of the component", default="Mentoring XBlock",
scope=Scope.settings)
icon_class = 'problem'
has_score = True
MENTORING_MODES = ('standard', 'assessment')
FLOATING_BLOCKS = (TitleBlock, MentoringMessageBlock, SharedHeaderBlock)
FIELDS_TO_INIT = ('xml_content',)
@property
def is_assessment(self):
return self.mode == 'assessment'
def get_question_number(self, question_id):
"""
Get the step number of the question id
"""
for question in self.get_children_objects():
if hasattr(question, 'step_number') and (question.name == question_id):
return question.step_number
raise ValueError("Question ID in answer set not a step of this Mentoring Block!")
def answer_mapper(self, answer_status):
"""
Create a JSON-dumpable object with readable key names from a list of student answers.
"""
return [
{
'number': self.get_question_number(answer[0]),
'id': answer[0],
'details': answer[1],
} for answer in self.student_results if answer[1]['status'] == answer_status
]
@property
def score(self):
"""Compute the student score taking into account the light child weight."""
total_child_weight = sum(float(step.weight) for step in self.steps)
if total_child_weight == 0:
return Score(0, 0, [], [], [])
score = sum(r[1]['score'] * r[1]['weight'] for r in self.student_results) / total_child_weight
correct = self.answer_mapper(CORRECT)
incorrect = self.answer_mapper(INCORRECT)
partially_correct = self.answer_mapper(PARTIAL)
return Score(score, int(round(score * 100)), correct, incorrect, partially_correct)
@property
def assessment_message(self):
if not self.max_attempts_reached:
return self.get_message_html('on-assessment-review')
else:
return None
def show_extended_feedback(self):
return self.extended_feedback and self.max_attempts_reached
def feedback_dispatch(self, target_data, stringify):
if self.show_extended_feedback():
if stringify:
return json.dumps(target_data)
else:
return target_data
def correct_json(self, stringify=True):
return self.feedback_dispatch(self.score.correct, stringify)
def incorrect_json(self, stringify=True):
return self.feedback_dispatch(self.score.incorrect, stringify)
def partial_json(self, stringify=True):
return self.feedback_dispatch(self.score.partially_correct, stringify)
def student_view(self, context):
# Migrate stored data if necessary
self.migrate_fields()
fragment, named_children = self.get_children_fragment(
context, view_name='mentoring_view',
not_instance_of=self.FLOATING_BLOCKS,
)
fragment.add_content(loader.render_template('templates/html/mentoring.html', {
'self': self,
'named_children': named_children,
'missing_dependency_url': self.has_missing_dependency and self.next_step_url,
}))
fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/mentoring.css'))
fragment.add_javascript_url(
self.runtime.local_resource_url(self, 'public/js/vendor/underscore-min.js'))
js_view = 'mentoring_assessment_view.js' if self.is_assessment else 'mentoring_standard_view.js'
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/'+js_view))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/mentoring.js'))
fragment.add_resource(loader.load_unicode('templates/html/mentoring_attempts.html'), "text/html")
fragment.add_resource(loader.load_unicode('templates/html/mentoring_grade.html'), "text/html")
fragment.add_resource(loader.load_unicode('templates/html/mentoring_review_questions.html'), "text/html")
fragment.initialize_js('MentoringBlock')
if not self.display_submit:
self.runtime.publish(self, 'progress', {})
return fragment
def migrate_fields(self):
"""
Migrate data stored in the fields, when a format change breaks backward-compatibility with
previous data formats
"""
# Partial answers replaced the `completed` with `status` in `self.student_results`
if self.student_results and 'completed' in self.student_results[0][1]:
# Rename the field and use the new value format (text instead of boolean)
for result in self.student_results:
result[1]['status'] = CORRECT if result[1]['completed'] else INCORRECT
del result[1]['completed']
@property
def additional_publish_event_data(self):
return {
'user_id': self.scope_ids.user_id,
'component_id': self.url_name,
}
@property
def title(self):
"""
Returns the title child.
"""
for child in self.get_children_objects():
if isinstance(child, TitleBlock):
return child
return None
@property
def header(self):
"""
Return the header child.
"""
for child in self.get_children_objects():
if isinstance(child, SharedHeaderBlock):
return child
return None
@property
def has_missing_dependency(self):
"""
Returns True if the student needs to complete another step before being able to complete
the current one, and False otherwise
"""
return self.enforce_dependency and (not self.completed) and (self.next_step != self.url_name)
@property
def next_step_url(self):
"""
Returns the URL of the next step's page
"""
return '/jump_to_id/{}'.format(self.next_step)
@XBlock.json_handler
def get_results(self, queries, suffix=''):
"""
Gets detailed results in the case of extended feedback.
It may be a good idea to eventually have this function get results
in the general case instead of loading them in the template in the future,
and only using it for extended feedback situations.
Right now there are two ways to get results-- through the template upon loading up
the mentoring block, or after submission of an AJAX request like in
submit or get_results here.
"""
results = []
if not self.show_extended_feedback():
return {
'results': [],
'error': 'Extended feedback results cannot be obtained.'
}
completed = True
choices = dict(self.student_results)
step = self.step
# Only one child should ever be of concern with this method.
for child in self.get_children_objects():
if child.name and child.name in queries:
results = [child.name, child.get_results(choices[child.name])]
# Children may have their own definition of 'completed' which can vary from the general case
# of the whole mentoring block being completed. This is because in standard mode, all children
# must be correct to complete the block. In assessment mode with extended feedback, completion
# happens when you're out of attempts, no matter how you did.
completed = choices[child.name]['status']
break
# The 'completed' message should always be shown in this case, since no more attempts are available.
message = self.get_message(True)
return {
'results': results,
'completed': completed,
'attempted': self.attempted,
'message': message,
'step': step,
'max_attempts': self.max_attempts,
'num_attempts': self.num_attempts,
}
def get_message(self, completed):
if self.max_attempts_reached:
return self.get_message_html('max_attempts_reached')
elif completed:
return self.get_message_html('completed')
else:
return self.get_message_html('incomplete')
@XBlock.json_handler
def submit(self, submissions, suffix=''):
log.info(u'Received submissions: {}'.format(submissions))
self.attempted = True
if self.is_assessment:
return self.handleAssessmentSubmit(submissions, suffix)
submit_results = []
completed = True
for child in self.get_children_objects():
if child.name and child.name in submissions:
submission = submissions[child.name]
child_result = child.submit(submission)
submit_results.append([child.name, child_result])
child.save()
completed = completed and (child_result['status'] == CORRECT)
message = self.get_message(completed)
# Once it has been completed once, keep completion even if user changes values
if self.completed:
completed = True
# server-side check to not set completion if the max_attempts is reached
if self.max_attempts_reached:
completed = False
if self.has_missing_dependency:
completed = False
message = 'You need to complete all previous steps before being able to complete the current one.'
elif completed and self.next_step == self.url_name:
self.next_step = self.followed_by
# Once it was completed, lock score
if not self.completed:
# save user score and results
while self.student_results:
self.student_results.pop()
for result in submit_results:
self.student_results.append(result)
self.runtime.publish(self, 'grade', {
'value': self.score.raw,
'max_value': 1,
})
if not self.completed and self.max_attempts > 0:
self.num_attempts += 1
self.completed = completed is True
raw_score = self.score.raw
self.publish_event_from_dict('xblock.mentoring.submitted', {
'num_attempts': self.num_attempts,
'submitted_answer': submissions,
'grade': raw_score,
})
return {
'results': submit_results,
'completed': self.completed,
'attempted': self.attempted,
'message': message,
'max_attempts': self.max_attempts,
'num_attempts': self.num_attempts
}
def handleAssessmentSubmit(self, submissions, suffix):
completed = False
current_child = None
children = [child for child in self.get_children_objects()
if not isinstance(child, self.FLOATING_BLOCKS)]
assessment_message = None
for child in children:
if child.name and child.name in submissions:
submission = submissions[child.name]
# Assessment mode doesn't allow to modify answers
# This will get the student back at the step he should be
current_child = child
step = children.index(child)
if self.step > step or self.max_attempts_reached:
step = self.step
completed = False
break
self.step = step + 1
child_result = child.submit(submission)
if 'tips' in child_result:
del child_result['tips']
self.student_results.append([child.name, child_result])
child.save()
completed = child_result['status']
event_data = {}
score = self.score
if current_child == self.steps[-1]:
log.info(u'Last assessment step submitted: {}'.format(submissions))
if not self.max_attempts_reached:
self.runtime.publish(self, 'grade', {
'value': score.raw,
'max_value': 1,
'score_type': 'proficiency',
})
event_data['final_grade'] = score.raw
assessment_message = self.assessment_message
self.num_attempts += 1
self.completed = True
event_data['exercise_id'] = current_child.name
event_data['num_attempts'] = self.num_attempts
event_data['submitted_answer'] = submissions
self.publish_event_from_dict('xblock.mentoring.assessment.submitted', event_data)
return {
'completed': completed,
'attempted': self.attempted,
'max_attempts': self.max_attempts,
'num_attempts': self.num_attempts,
'step': self.step,
'score': score.percentage,
'correct_answer': len(score.correct),
'incorrect_answer': len(score.incorrect),
'partially_correct_answer': len(score.partially_correct),
'extended_feedback': self.show_extended_feedback() or '',
'correct': self.correct_json(stringify=False),
'incorrect': self.incorrect_json(stringify=False),
'partial': self.partial_json(stringify=False),
'assessment_message': assessment_message,
}
@XBlock.json_handler
def try_again(self, data, suffix=''):
if self.max_attempts_reached:
return {
'result': 'error',
'message': 'max attempts reached'
}
# reset
self.step = 0
self.completed = False
while self.student_results:
self.student_results.pop()
return {
'result': 'success'
}
@property
def max_attempts_reached(self):
return self.max_attempts > 0 and self.num_attempts >= self.max_attempts
def get_message_fragment(self, message_type):
for child in self.get_children_objects():
if isinstance(child, MentoringMessageBlock) and child.type == message_type:
frag = self.render_child(child, 'mentoring_view', {})
return self.fragment_text_rewriting(frag)
def get_message_html(self, message_type):
fragment = self.get_message_fragment(message_type)
if fragment:
return fragment.body_html()
else:
return ''
def studio_view(self, context):
"""
Editing view in Studio
"""
fragment = Fragment()
fragment.add_content(loader.render_template('templates/html/mentoring_edit.html', {
'self': self,
'xml_content': self.xml_content,
}))
fragment.add_javascript_url(
self.runtime.local_resource_url(self, 'public/js/mentoring_edit.js'))
fragment.add_css_url(
self.runtime.local_resource_url(self, 'public/css/mentoring_edit.css'))
fragment.initialize_js('MentoringEditBlock')
return fragment
@XBlock.json_handler
def studio_submit(self, submissions, suffix=''):
log.info(u'Received studio submissions: {}'.format(submissions))
xml_content = submissions['xml_content']
try:
content = etree.parse(StringIO(xml_content))
except etree.XMLSyntaxError as e:
response = {
'result': 'error',
'message': e.message
}
else:
success = True
root = content.getroot()
if 'mode' in root.attrib:
if root.attrib['mode'] not in self.MENTORING_MODES:
response = {
'result': 'error',
'message': "Invalid mentoring mode: should be 'standard' or 'assessment'"
}
success = False
elif root.attrib['mode'] == 'assessment' and 'max_attempts' not in root.attrib:
# assessment has a default of 2 max_attempts
root.attrib['max_attempts'] = '2'
if success:
response = {
'result': 'success',
}
self.xml_content = etree.tostring(content, pretty_print=True)
log.debug(u'Response from Studio: {}'.format(response))
return response
@property
def url_name_with_default(self):
"""
Ensure the `url_name` is set to a unique, non-empty value.
This should ideally be handled by Studio, but we need to declare the attribute
to be able to use it from the workbench, and when this happen Studio doesn't set
a unique default value - this property gives either the set value, or if none is set
a randomized default value
"""
if self.url_name == 'mentoring-default':
return 'mentoring-{}'.format(uuid.uuid4())
else:
return self.url_name
@staticmethod
def workbench_scenarios():
"""
Scenarios displayed by the workbench. Load them from external (private) repository
"""
return loader.load_scenarios_from_path('templates/xml')
|
agpl-3.0
| 2,307,463,548,119,067,600
| 37.337838
| 113
| 0.60341
| false
| 4.379776
| false
| false
| false
|
mpunkenhofer/irc-telegram-bot
|
telepot/telepot/__init__.py
|
1
|
39572
|
import sys
import io
import time
import json
import threading
import traceback
import collections
import bisect
try:
import Queue as queue
except ImportError:
import queue
# Patch urllib3 for sending unicode filename
from . import hack
from . import exception
__version_info__ = (10, 5)
__version__ = '.'.join(map(str, __version_info__))
def flavor(msg):
"""
Return flavor of message or event.
A message's flavor may be one of these:
- ``chat``
- ``callback_query``
- ``inline_query``
- ``chosen_inline_result``
An event's flavor is determined by the single top-level key.
"""
if 'message_id' in msg:
return 'chat'
elif 'id' in msg and 'chat_instance' in msg:
return 'callback_query'
elif 'id' in msg and 'query' in msg:
return 'inline_query'
elif 'result_id' in msg:
return 'chosen_inline_result'
else:
top_keys = list(msg.keys())
if len(top_keys) == 1:
return top_keys[0]
raise exception.BadFlavor(msg)
chat_flavors = ['chat']
inline_flavors = ['inline_query', 'chosen_inline_result']
def _find_first_key(d, keys):
for k in keys:
if k in d:
return k
raise KeyError('No suggested keys %s in %s' % (str(keys), str(d)))
all_content_types = [
'text', 'audio', 'document', 'game', 'photo', 'sticker', 'video', 'voice',
'contact', 'location', 'venue', 'new_chat_member', 'left_chat_member', 'new_chat_title',
'new_chat_photo', 'delete_chat_photo', 'group_chat_created', 'supergroup_chat_created',
'channel_chat_created', 'migrate_to_chat_id', 'migrate_from_chat_id', 'pinned_message',
]
def glance(msg, flavor='chat', long=False):
"""
Extract "headline" info about a message.
Use parameter ``long`` to control whether a short or long tuple is returned.
When ``flavor`` is ``chat``
(``msg`` being a `Message <https://core.telegram.org/bots/api#message>`_ object):
- short: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``)
- long: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``, ``msg['date']``, ``msg['message_id']``)
*content_type* can be: ``text``, ``audio``, ``document``, ``game``, ``photo``, ``sticker``, ``video``, ``voice``,
``contact``, ``location``, ``venue``, ``new_chat_member``, ``left_chat_member``, ``new_chat_title``,
``new_chat_photo``, ``delete_chat_photo``, ``group_chat_created``, ``supergroup_chat_created``,
``channel_chat_created``, ``migrate_to_chat_id``, ``migrate_from_chat_id``, ``pinned_message``.
When ``flavor`` is ``callback_query``
(``msg`` being a `CallbackQuery <https://core.telegram.org/bots/api#callbackquery>`_ object):
- regardless: (``msg['id']``, ``msg['from']['id']``, ``msg['data']``)
When ``flavor`` is ``inline_query``
(``msg`` being a `InlineQuery <https://core.telegram.org/bots/api#inlinequery>`_ object):
- short: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``)
- long: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``, ``msg['offset']``)
When ``flavor`` is ``chosen_inline_result``
(``msg`` being a `ChosenInlineResult <https://core.telegram.org/bots/api#choseninlineresult>`_ object):
- regardless: (``msg['result_id']``, ``msg['from']['id']``, ``msg['query']``)
"""
def gl_chat():
content_type = _find_first_key(msg, all_content_types)
if long:
return content_type, msg['chat']['type'], msg['chat']['id'], msg['date'], msg['message_id']
else:
return content_type, msg['chat']['type'], msg['chat']['id']
def gl_callback_query():
return msg['id'], msg['from']['id'], msg['data']
def gl_inline_query():
if long:
return msg['id'], msg['from']['id'], msg['query'], msg['offset']
else:
return msg['id'], msg['from']['id'], msg['query']
def gl_chosen_inline_result():
return msg['result_id'], msg['from']['id'], msg['query']
try:
fn = {'chat': gl_chat,
'callback_query': gl_callback_query,
'inline_query': gl_inline_query,
'chosen_inline_result': gl_chosen_inline_result}[flavor]
except KeyError:
raise exception.BadFlavor(flavor)
return fn()
def flance(msg, long=False):
"""
A combination of :meth:`telepot.flavor` and :meth:`telepot.glance`,
return a 2-tuple (flavor, headline_info), where *headline_info* is whatever extracted by
:meth:`telepot.glance` depending on the message flavor and the ``long`` parameter.
"""
f = flavor(msg)
g = glance(msg, flavor=f, long=long)
return f,g
def peel(event):
"""
Remove an event's top-level skin (where its flavor is determined), and return
the core content.
"""
return list(event.values())[0]
def fleece(event):
"""
A combination of :meth:`telepot.flavor` and :meth:`telepot.peel`,
return a 2-tuple (flavor, content) of an event.
"""
return flavor(event), peel(event)
def is_event(msg):
"""
Return whether the message looks like an event. That is, whether it has a flavor
that starts with an underscore.
"""
return flavor(msg).startswith('_')
def origin_identifier(msg):
"""
Extract the message identifier of a callback query's origin. Returned value
is guaranteed to be a tuple.
``msg`` is expected to be ``callback_query``.
"""
if 'message' in msg:
return msg['message']['chat']['id'], msg['message']['message_id']
elif 'inline_message_id' in msg:
return msg['inline_message_id'],
else:
raise ValueError()
def message_identifier(msg):
"""
Extract an identifier for message editing. Useful with :meth:`telepot.Bot.editMessageText`
and similar methods. Returned value is guaranteed to be a tuple.
``msg`` is expected to be ``chat`` or ``choson_inline_result``.
"""
if 'chat' in msg and 'message_id' in msg:
return msg['chat']['id'], msg['message_id']
elif 'inline_message_id' in msg:
return msg['inline_message_id'],
else:
raise ValueError()
def _dismantle_message_identifier(f):
if isinstance(f, tuple):
if len(f) == 2:
return {'chat_id': f[0], 'message_id': f[1]}
elif len(f) == 1:
return {'inline_message_id': f[0]}
else:
raise ValueError()
else:
return {'inline_message_id': f}
PY_3 = sys.version_info.major >= 3
_string_type = str if PY_3 else basestring
_file_type = io.IOBase if PY_3 else file
def _isstring(s):
return isinstance(s, _string_type)
def _isfile(f):
return isinstance(f, _file_type)
from . import helper
def flavor_router(routing_table):
router = helper.Router(flavor, routing_table)
return router.route
class _BotBase(object):
def __init__(self, token):
self._token = token
self._file_chunk_size = 65536
def _strip(params, more=[]):
return {key: value for key,value in params.items() if key not in ['self']+more}
def _rectify(params):
def namedtuple_to_dict(value):
if isinstance(value, list):
return [namedtuple_to_dict(v) for v in value]
elif isinstance(value, dict):
return {k:namedtuple_to_dict(v) for k,v in value.items() if v is not None}
elif isinstance(value, tuple) and hasattr(value, '_asdict'):
return {k:namedtuple_to_dict(v) for k,v in value._asdict().items() if v is not None}
else:
return value
def flatten(value):
v = namedtuple_to_dict(value)
if isinstance(v, (dict, list)):
return json.dumps(v, separators=(',',':'))
else:
return v
# remove None, then json-serialize if needed
return {k: flatten(v) for k,v in params.items() if v is not None}
from . import api
class Bot(_BotBase):
class Scheduler(threading.Thread):
# A class that is sorted by timestamp. Use `bisect` module to ensure order in event queue.
Event = collections.namedtuple('Event', ['timestamp', 'data'])
Event.__eq__ = lambda self, other: self.timestamp == other.timestamp
Event.__ne__ = lambda self, other: self.timestamp != other.timestamp
Event.__gt__ = lambda self, other: self.timestamp > other.timestamp
Event.__ge__ = lambda self, other: self.timestamp >= other.timestamp
Event.__lt__ = lambda self, other: self.timestamp < other.timestamp
Event.__le__ = lambda self, other: self.timestamp <= other.timestamp
def __init__(self):
super(Bot.Scheduler, self).__init__()
self._eventq = []
self._lock = threading.RLock() # reentrant lock to allow locked method calling locked method
self._output_queue = None
def _locked(fn):
def k(self, *args, **kwargs):
with self._lock:
return fn(self, *args, **kwargs)
return k
@_locked
def _insert_event(self, data, when):
ev = self.Event(when, data)
bisect.insort(self._eventq, ev)
return ev
@_locked
def _remove_event(self, event):
# Find event according to its timestamp.
# Index returned should be one behind.
i = bisect.bisect(self._eventq, event)
# Having two events with identical timestamp is unlikely but possible.
# I am going to move forward and compare timestamp AND object address
# to make sure the correct object is found.
while i > 0:
i -= 1
e = self._eventq[i]
if e.timestamp != event.timestamp:
raise exception.EventNotFound(event)
elif id(e) == id(event):
self._eventq.pop(i)
return
raise exception.EventNotFound(event)
@_locked
def _pop_expired_event(self):
if not self._eventq:
return None
if self._eventq[0].timestamp <= time.time():
return self._eventq.pop(0)
else:
return None
def event_at(self, when, data):
"""
Schedule some data to emit at an absolute timestamp.
:type when: int or float
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, when)
def event_later(self, delay, data):
"""
Schedule some data to emit after a number of seconds.
:type delay: int or float
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, time.time()+delay)
def event_now(self, data):
"""
Emit some data as soon as possible.
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, time.time())
def cancel(self, event):
"""
Cancel an event.
:type event: an internal Event object
"""
self._remove_event(event)
def run(self):
while 1:
e = self._pop_expired_event()
while e:
if callable(e.data):
d = e.data()
if d is not None:
self._output_queue.put(d)
else:
self._output_queue.put(e.data)
e = self._pop_expired_event()
time.sleep(0.1)
def __init__(self, token):
super(Bot, self).__init__(token)
self._scheduler = self.Scheduler()
self._router = helper.Router(flavor, {'chat': lambda msg: self.on_chat_message(msg),
'callback_query': lambda msg: self.on_callback_query(msg),
'inline_query': lambda msg: self.on_inline_query(msg),
'chosen_inline_result': lambda msg: self.on_chosen_inline_result(msg)})
# use lambda to delay evaluation of self.on_ZZZ to runtime because
# I don't want to require defining all methods right here.
@property
def scheduler(self):
return self._scheduler
@property
def router(self):
return self._router
def handle(self, msg):
self._router.route(msg)
def _api_request(self, method, params=None, files=None, **kwargs):
return api.request((self._token, method, params, files), **kwargs)
def getMe(self):
""" See: https://core.telegram.org/bots/api#getme """
return self._api_request('getMe')
def sendMessage(self, chat_id, text,
parse_mode=None, disable_web_page_preview=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendmessage """
p = _strip(locals())
return self._api_request('sendMessage', _rectify(p))
def forwardMessage(self, chat_id, from_chat_id, message_id, disable_notification=None):
""" See: https://core.telegram.org/bots/api#forwardmessage """
p = _strip(locals())
return self._api_request('forwardMessage', _rectify(p))
def _sendfile(self, inputfile, filetype, params):
method = {'photo': 'sendPhoto',
'audio': 'sendAudio',
'document': 'sendDocument',
'sticker': 'sendSticker',
'video': 'sendVideo',
'voice': 'sendVoice',}[filetype]
if _isstring(inputfile):
params[filetype] = inputfile
return self._api_request(method, _rectify(params))
else:
files = {filetype: inputfile}
return self._api_request(method, _rectify(params), files)
def sendPhoto(self, chat_id, photo,
caption=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendphoto
:param photo:
a string indicating a ``file_id`` on server,
a file-like object as obtained by ``open()`` or ``urlopen()``,
or a (filename, file-like object) tuple.
If the file-like object is obtained by ``urlopen()``, you most likely
have to supply a filename because Telegram servers require to know
the file extension.
If the filename contains non-ASCII characters and you are using Python 2.7,
make sure the filename is a unicode string.
"""
p = _strip(locals(), more=['photo'])
return self._sendfile(photo, 'photo', p)
def sendAudio(self, chat_id, audio,
caption=None, duration=None, performer=None, title=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendaudio
:param audio: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['audio'])
return self._sendfile(audio, 'audio', p)
def sendDocument(self, chat_id, document,
caption=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['document'])
return self._sendfile(document, 'document', p)
def sendSticker(self, chat_id, sticker,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendsticker
:param sticker: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['sticker'])
return self._sendfile(sticker, 'sticker', p)
def sendVideo(self, chat_id, video,
duration=None, width=None, height=None, caption=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvideo
:param video: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['video'])
return self._sendfile(video, 'video', p)
def sendVoice(self, chat_id, voice,
caption=None, duration=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvoice
:param voice: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['voice'])
return self._sendfile(voice, 'voice', p)
def sendLocation(self, chat_id, latitude, longitude,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendlocation """
p = _strip(locals())
return self._api_request('sendLocation', _rectify(p))
def sendVenue(self, chat_id, latitude, longitude, title, address,
foursquare_id=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendvenue """
p = _strip(locals())
return self._api_request('sendVenue', _rectify(p))
def sendContact(self, chat_id, phone_number, first_name,
last_name=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendcontact """
p = _strip(locals())
return self._api_request('sendContact', _rectify(p))
def sendGame(self, chat_id, game_short_name,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendgame """
p = _strip(locals())
return self._api_request('sendGame', _rectify(p))
def sendChatAction(self, chat_id, action):
""" See: https://core.telegram.org/bots/api#sendchataction """
p = _strip(locals())
return self._api_request('sendChatAction', _rectify(p))
def getUserProfilePhotos(self, user_id, offset=None, limit=None):
""" See: https://core.telegram.org/bots/api#getuserprofilephotos """
p = _strip(locals())
return self._api_request('getUserProfilePhotos', _rectify(p))
def getFile(self, file_id):
""" See: https://core.telegram.org/bots/api#getfile """
p = _strip(locals())
return self._api_request('getFile', _rectify(p))
def kickChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#kickchatmember """
p = _strip(locals())
return self._api_request('kickChatMember', _rectify(p))
def leaveChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#leavechat """
p = _strip(locals())
return self._api_request('leaveChat', _rectify(p))
def unbanChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#unbanchatmember """
p = _strip(locals())
return self._api_request('unbanChatMember', _rectify(p))
def getChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchat """
p = _strip(locals())
return self._api_request('getChat', _rectify(p))
def getChatAdministrators(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatadministrators """
p = _strip(locals())
return self._api_request('getChatAdministrators', _rectify(p))
def getChatMembersCount(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatmemberscount """
p = _strip(locals())
return self._api_request('getChatMembersCount', _rectify(p))
def getChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#getchatmember """
p = _strip(locals())
return self._api_request('getChatMember', _rectify(p))
def answerCallbackQuery(self, callback_query_id,
text=None, show_alert=None, url=None, cache_time=None):
""" See: https://core.telegram.org/bots/api#answercallbackquery """
p = _strip(locals())
return self._api_request('answerCallbackQuery', _rectify(p))
def editMessageText(self, msg_identifier, text,
parse_mode=None, disable_web_page_preview=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagetext
:param msg_identifier:
a 2-tuple (``chat_id``, ``message_id``),
a 1-tuple (``inline_message_id``),
or simply ``inline_message_id``.
You may extract this value easily with :meth:`telepot.message_identifier`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageText', _rectify(p))
def editMessageCaption(self, msg_identifier, caption=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagecaption
:param msg_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageCaption', _rectify(p))
def editMessageReplyMarkup(self, msg_identifier, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagereplymarkup
:param msg_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageReplyMarkup', _rectify(p))
def answerInlineQuery(self, inline_query_id, results,
cache_time=None, is_personal=None, next_offset=None,
switch_pm_text=None, switch_pm_parameter=None):
""" See: https://core.telegram.org/bots/api#answerinlinequery """
p = _strip(locals())
return self._api_request('answerInlineQuery', _rectify(p))
def getUpdates(self, offset=None, limit=None, timeout=None, allowed_updates=None):
""" See: https://core.telegram.org/bots/api#getupdates """
p = _strip(locals())
return self._api_request('getUpdates', _rectify(p))
def setWebhook(self, url=None, certificate=None, max_connections=None, allowed_updates=None):
""" See: https://core.telegram.org/bots/api#setwebhook """
p = _strip(locals(), more=['certificate'])
if certificate:
files = {'certificate': certificate}
return self._api_request('setWebhook', _rectify(p), files)
else:
return self._api_request('setWebhook', _rectify(p))
def deleteWebhook(self):
""" See: https://core.telegram.org/bots/api#deletewebhook """
return self._api_request('deleteWebhook')
def getWebhookInfo(self):
""" See: https://core.telegram.org/bots/api#getwebhookinfo """
return self._api_request('getWebhookInfo')
def setGameScore(self, user_id, score, game_message_identifier,
force=None, disable_edit_message=None):
"""
See: https://core.telegram.org/bots/api#setgamescore
:param game_message_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return self._api_request('setGameScore', _rectify(p))
def getGameHighScores(self, user_id, game_message_identifier):
"""
See: https://core.telegram.org/bots/api#getgamehighscores
:param game_message_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return self._api_request('getGameHighScores', _rectify(p))
def download_file(self, file_id, dest):
"""
Download a file to local disk.
:param dest: a path or a ``file`` object
"""
f = self.getFile(file_id)
try:
d = dest if _isfile(dest) else open(dest, 'wb')
r = api.download((self._token, f['file_path']), preload_content=False)
while 1:
data = r.read(self._file_chunk_size)
if not data:
break
d.write(data)
finally:
if not _isfile(dest) and 'd' in locals():
d.close()
if 'r' in locals():
r.release_conn()
def message_loop(self, callback=None, relax=0.1,
timeout=20, allowed_updates=None,
source=None, ordered=True, maxhold=3,
run_forever=False):
"""
Spawn a thread to constantly ``getUpdates`` or pull updates from a queue.
Apply ``callback`` to every message received. Also starts the scheduler thread
for internal events.
:param callback:
a function that takes one argument (the message), or a routing table.
If ``None``, the bot's ``handle`` method is used.
A *routing table* is a dictionary of ``{flavor: function}``, mapping messages to appropriate
handler functions according to their flavors. It allows you to define functions specifically
to handle one flavor of messages. It usually looks like this: ``{'chat': fn1,
'callback_query': fn2, 'inline_query': fn3, ...}``. Each handler function should take
one argument (the message).
:param source:
Source of updates.
If ``None``, ``getUpdates`` is used to obtain new messages from Telegram servers.
If it is a synchronized queue (``Queue.Queue`` in Python 2.7 or
``queue.Queue`` in Python 3), new messages are pulled from the queue.
A web application implementing a webhook can dump updates into the queue,
while the bot pulls from it. This is how telepot can be integrated with webhooks.
Acceptable contents in queue:
- ``str``, ``unicode`` (Python 2.7), or ``bytes`` (Python 3, decoded using UTF-8)
representing a JSON-serialized `Update <https://core.telegram.org/bots/api#update>`_ object.
- a ``dict`` representing an Update object.
When ``source`` is ``None``, these parameters are meaningful:
:type relax: float
:param relax: seconds between each ``getUpdates``
:type timeout: int
:param timeout:
``timeout`` parameter supplied to :meth:`telepot.Bot.getUpdates`,
controlling how long to poll.
:type allowed_updates: array of string
:param allowed_updates:
``allowed_updates`` parameter supplied to :meth:`telepot.Bot.getUpdates`,
controlling which types of updates to receive.
When ``source`` is a queue, these parameters are meaningful:
:type ordered: bool
:param ordered:
If ``True``, ensure in-order delivery of messages to ``callback``
(i.e. updates with a smaller ``update_id`` always come before those with
a larger ``update_id``).
If ``False``, no re-ordering is done. ``callback`` is applied to messages
as soon as they are pulled from queue.
:type maxhold: float
:param maxhold:
Applied only when ``ordered`` is ``True``. The maximum number of seconds
an update is held waiting for a not-yet-arrived smaller ``update_id``.
When this number of seconds is up, the update is delivered to ``callback``
even if some smaller ``update_id``\s have not yet arrived. If those smaller
``update_id``\s arrive at some later time, they are discarded.
Finally, there is this parameter, meaningful always:
:type run_forever: bool or str
:param run_forever:
If ``True`` or any non-empty string, append an infinite loop at the end of
this method, so it never returns. Useful as the very last line in a program.
A non-empty string will also be printed, useful as an indication that the
program is listening.
"""
if callback is None:
callback = self.handle
elif isinstance(callback, dict):
callback = flavor_router(callback)
collect_queue = queue.Queue()
def collector():
while 1:
try:
item = collect_queue.get(block=True)
callback(item)
except:
# Localize error so thread can keep going.
traceback.print_exc()
def relay_to_collector(update):
key = _find_first_key(update, ['message',
'edited_message',
'channel_post',
'edited_channel_post',
'callback_query',
'inline_query',
'chosen_inline_result'])
collect_queue.put(update[key])
return update['update_id']
def get_from_telegram_server():
offset = None # running offset
allowed_upd = allowed_updates
while 1:
try:
result = self.getUpdates(offset=offset,
timeout=timeout,
allowed_updates=allowed_upd)
# Once passed, this parameter is no longer needed.
allowed_upd = None
if len(result) > 0:
# No sort. Trust server to give messages in correct order.
# Update offset to max(update_id) + 1
offset = max([relay_to_collector(update) for update in result]) + 1
except exception.BadHTTPResponse as e:
traceback.print_exc()
# Servers probably down. Wait longer.
if e.status == 502:
time.sleep(30)
except:
traceback.print_exc()
finally:
time.sleep(relax)
def dictify3(data):
if type(data) is bytes:
return json.loads(data.decode('utf-8'))
elif type(data) is str:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
def dictify27(data):
if type(data) in [str, unicode]:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
def get_from_queue_unordered(qu):
dictify = dictify3 if sys.version_info >= (3,) else dictify27
while 1:
try:
data = qu.get(block=True)
update = dictify(data)
relay_to_collector(update)
except:
traceback.print_exc()
def get_from_queue(qu):
dictify = dictify3 if sys.version_info >= (3,) else dictify27
# Here is the re-ordering mechanism, ensuring in-order delivery of updates.
max_id = None # max update_id passed to callback
buffer = collections.deque() # keep those updates which skip some update_id
qwait = None # how long to wait for updates,
# because buffer's content has to be returned in time.
while 1:
try:
data = qu.get(block=True, timeout=qwait)
update = dictify(data)
if max_id is None:
# First message received, handle regardless.
max_id = relay_to_collector(update)
elif update['update_id'] == max_id + 1:
# No update_id skipped, handle naturally.
max_id = relay_to_collector(update)
# clear contagious updates in buffer
if len(buffer) > 0:
buffer.popleft() # first element belongs to update just received, useless now.
while 1:
try:
if type(buffer[0]) is dict:
max_id = relay_to_collector(buffer.popleft()) # updates that arrived earlier, handle them.
else:
break # gap, no more contagious updates
except IndexError:
break # buffer empty
elif update['update_id'] > max_id + 1:
# Update arrives pre-maturely, insert to buffer.
nbuf = len(buffer)
if update['update_id'] <= max_id + nbuf:
# buffer long enough, put update at position
buffer[update['update_id'] - max_id - 1] = update
else:
# buffer too short, lengthen it
expire = time.time() + maxhold
for a in range(nbuf, update['update_id']-max_id-1):
buffer.append(expire) # put expiry time in gaps
buffer.append(update)
else:
pass # discard
except queue.Empty:
# debug message
# print('Timeout')
# some buffer contents have to be handled
# flush buffer until a non-expired time is encountered
while 1:
try:
if type(buffer[0]) is dict:
max_id = relay_to_collector(buffer.popleft())
else:
expire = buffer[0]
if expire <= time.time():
max_id += 1
buffer.popleft()
else:
break # non-expired
except IndexError:
break # buffer empty
except:
traceback.print_exc()
finally:
try:
# don't wait longer than next expiry time
qwait = buffer[0] - time.time()
if qwait < 0:
qwait = 0
except IndexError:
# buffer empty, can wait forever
qwait = None
# debug message
# print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)
collector_thread = threading.Thread(target=collector)
collector_thread.daemon = True
collector_thread.start()
if source is None:
message_thread = threading.Thread(target=get_from_telegram_server)
elif isinstance(source, queue.Queue):
if ordered:
message_thread = threading.Thread(target=get_from_queue, args=(source,))
else:
message_thread = threading.Thread(target=get_from_queue_unordered, args=(source,))
else:
raise ValueError('Invalid source')
message_thread.daemon = True # need this for main thread to be killable by Ctrl-C
message_thread.start()
self._scheduler._output_queue = collect_queue
self._scheduler.daemon = True
self._scheduler.start()
if run_forever:
if _isstring(run_forever):
print(run_forever)
while 1:
time.sleep(10)
import inspect
class SpeakerBot(Bot):
def __init__(self, token):
super(SpeakerBot, self).__init__(token)
self._mic = helper.Microphone()
@property
def mic(self):
return self._mic
def create_listener(self):
q = queue.Queue()
self._mic.add(q)
ln = helper.Listener(self._mic, q)
return ln
class DelegatorBot(SpeakerBot):
def __init__(self, token, delegation_patterns):
"""
:param delegation_patterns: a list of (seeder, delegator) tuples.
"""
super(DelegatorBot, self).__init__(token)
self._delegate_records = [p+({},) for p in delegation_patterns]
def _startable(self, delegate):
return ((hasattr(delegate, 'start') and inspect.ismethod(delegate.start)) and
(hasattr(delegate, 'is_alive') and inspect.ismethod(delegate.is_alive)))
def _tuple_is_valid(self, t):
return len(t) == 3 and callable(t[0]) and type(t[1]) in [list, tuple] and type(t[2]) is dict
def _ensure_startable(self, delegate):
if self._startable(delegate):
return delegate
elif callable(delegate):
return threading.Thread(target=delegate)
elif type(delegate) is tuple and self._tuple_is_valid(delegate):
func, args, kwargs = delegate
return threading.Thread(target=func, args=args, kwargs=kwargs)
else:
raise RuntimeError('Delegate does not have the required methods, is not callable, and is not a valid tuple.')
def handle(self, msg):
self._mic.send(msg)
for calculate_seed, make_delegate, dict in self._delegate_records:
id = calculate_seed(msg)
if id is None:
continue
elif isinstance(id, collections.Hashable):
if id not in dict or not dict[id].is_alive():
d = make_delegate((self, msg, id))
d = self._ensure_startable(d)
dict[id] = d
dict[id].start()
else:
d = make_delegate((self, msg, id))
d = self._ensure_startable(d)
d.start()
|
mit
| -1,966,166,402,499,118,800
| 37.531646
| 131
| 0.550996
| false
| 4.154105
| false
| false
| false
|
ImmobilienScout24/cfn-sphere
|
src/main/python/cfn_sphere/template/__init__.py
|
1
|
1485
|
import json
class CloudFormationTemplate(object):
def __init__(self, body_dict, name):
self.name = name
self.template_format_version = body_dict.get('AWSTemplateFormatVersion', '2010-09-09')
self.description = body_dict.get('Description', '')
self.metadata = body_dict.get('Metadata', {})
self.parameters = body_dict.get('Parameters', {})
self.mappings = body_dict.get('Mappings', {})
self.conditions = body_dict.get('Conditions', {})
self.resources = body_dict.get('Resources', {})
self.outputs = body_dict.get('Outputs', {})
self.post_custom_resources = body_dict.get('PostCustomResources', {})
def get_no_echo_parameter_keys(self):
if self.parameters:
return [key for key, value in self.parameters.items() if str(value.get('NoEcho')).lower() == 'true']
else:
return []
def get_template_body_dict(self):
return {
'AWSTemplateFormatVersion': self.template_format_version,
'Description': self.description,
'Parameters': self.parameters,
'Mappings': self.mappings,
'Conditions': self.conditions,
'Resources': self.resources,
'Outputs': self.outputs
}
def get_pretty_template_json(self):
return json.dumps(self.get_template_body_dict(), indent=2)
def get_template_json(self):
return json.dumps(self.get_template_body_dict())
|
apache-2.0
| -3,528,714,139,985,188,400
| 38.078947
| 112
| 0.607407
| false
| 3.96
| false
| false
| false
|
Mangara/ArboralExplorer
|
lib/Cmpl/cmplServer/cmplServer/CmplGridScheduler.py
|
1
|
24624
|
#***********************************************************************
# This code is part of CmplServer
#
# Copyright (C) 2013, 2014
# Mike Steglich - Technical University of Applied Sciences
# Wildau, Germany
#
# CmplServer is a project of the Technical University of
# Applied Sciences Wildau and the Institute for Operations Research
# and Business Management at the Martin Luther University
# Halle-Wittenberg.
# Please visit the project homepage <www.coliop.org>
#
# CmplServer is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# CmplServer is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
#**********************************************************************
#!/usr/bin/python
from __future__ import division
from pyCmpl.CmplDefs import *
from pyCmpl.CmplTools import *
from CmplServerException import *
from CmplServerTools import *
import xmlrpclib
import thread
import threading
import random
import os
import sys
import subprocess
import socket
import time
#################################################################################
#
# CmplServerHandler
#
#################################################################################
class CmplServerHandler(object):
#*********** constructor **********
def __init__(self, cmplServerUrl , maxProb, performanceIndex ):
self.__cmplServerUrl = cmplServerUrl
self.__cmplServer = None
self.__emptyProblems = maxProb
self.__maxProblems = maxProb
self.__performanceIndex = performanceIndex
self.__lastActivity = 0
self.__isActive = True
self.__solvers = []
#*********** end constructor *****
# getter and setter ***************
@property
def cmplServerUrl(self):
return self.__cmplServerUrl
def setCmplServerUrl(self, url):
self.__cmplServerUrl = url
@property
def cmplServer(self):
return self.__cmplServer
def setCmplServer(self, server):
self.__cmplServer = server
@property
def emptyProblems(self):
return self.__emptyProblems
def setEmptyProblems(self, nr):
self.__emptyProblems=nr
def addEmptyProblem(self):
self.__emptyProblems += 1
def removeEmptyProblem(self):
self.__emptyProblems -= 1
@property
def maxProblems(self):
return self.__maxProblems
def setMaxProblems(self, nr):
self.__maxProblems=nr
@property
def performanceIndex(self):
return self.__performanceIndex
def setPerformanceIndex(self, nr):
self.__performanceIndex=nr
@property
def lastActivityTime(self):
return self.__lastActivityTime
def setLastActivityTime(self, timeStamp):
self.__lastActivityTime=timeStamp
@property
def isActive(self):
return self.__isActive
def setActive(self, val):
self.__isActive=val
@property
def solvers(self):
return self.__solvers
def setSolvers(self, solvers):
self.__solvers=solvers
# end getter and setter *************
#################################################################################
# End CmplServerHandler
#################################################################################
#################################################################################
#
# ProblemQueueHandler
#
#################################################################################
class ProblemQueueHandler(object):
#*********** constructor **********
def __init__(self, cmplName , solver):
self.__cmplServerId = None
self.__cmplName = cmplName
self.__status = CMPLGRID_SCHEDULER_UNKNOWN
self.__solver = solver
self.setLastActivityTime(time.time())
#*********** end constructor *****
# getter and setter ***************
@property
def cmplServerId(self):
return self.__cmplServerId
def setCmplServerId(self, id):
self.__cmplServerId = id
@property
def cmplName(self):
return self.__cmplName
def setCmplName(self, name):
self.__cmplName = url
@property
def status(self):
return self.__status
def setStatus(self, status):
self.__status = status
@property
def solver(self):
return self.__solver
@property
def lastActivityTime(self):
return self.__lastActivityTime
def setLastActivityTime(self, timeStamp):
self.__lastActivityTime=timeStamp
# end getter and setter *************
#################################################################################
# End ProblemQueueHandler
#################################################################################
#################################################################################
#
# CmplGridScheduler
#
#################################################################################
class CmplGridScheduler(object):
#****************************************************************************
# Constructor and destructor
#****************************************************************************
#*********** constructor **********
def __init__(self, port = None ):
self.__compatibility = COMPATIBILITY
self.__server = None
self.__serverList = {}
self.__problemQueueInfos = {}
self.__problemQueue = []
self.__maxCmplServerTries = 10
self.__maxInactivityTime=60*60*12 # half a day
self.__schedulerStatus = CMPLGRID_SCHEDULER_OK
self.__schedulerStatusTxt = "CMPLGrid scheduler is running"
self.__solvers = ["cbc","glpk","scip","gurobi","cplex"]
if port == None:
self.__cmplPort = 8008
else:
self.__cmplPort = port
self.__serviceIntervall = 0.1
self.__serviceIntervall2 = 30
self.__serviceThreadHandler = None
self.__cmplServerPath = os.path.expanduser("~") + os.sep+ "CmplServer" +os.sep
self.__logFileName = self.__cmplServerPath + "cmplGridScheduler.log"
if os.path.exists(self.__cmplServerPath) == False:
try:
os.mkdir(self.__cmplServerPath)
except OSError, e:
raise CmplServerException( "Cannot create CmplServer path <"+self.__cmplServerPath+">")
try:
self.__logFile = open(self.__logFileName, "a")
except IOError, e:
raise CmplServerException( "Cannot read CmplServer option file <"+self.__optFileName+"> " + str(e) )
try:
try:
self.__optFileName=os.environ['CMPLSERVERPATH']+ os.sep + "cmplServer.opt"
except:
self.__optFileName=os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + ".." + os.sep + "cmplServer.opt"
f = open(self.__optFileName, "r")
for line in f:
ret=line.split("=")
if ret[0].strip().lower() == "schedulerserviceintervall":
if CmplTools.strIsNumber(ret[1].strip()):
self.__serviceIntervall = float(ret[1].strip())
else:
CmplServerTools.cmplLogging( self.__logFile, "Wrong schedulerServiceIntervall in CmplServer option file <"+str(self.__serviceIntervall)+"> default value is used" )
if ret[0].strip().lower() == "sserviceintervall":
if CmplTools.strIsNumber(ret[1].strip()):
self.__serviceIntervall2 = float(ret[1].strip())
else:
CmplServerTools.cmplLogging( self.__logFile, "Wrong serviceIntervall in CmplServer option file <"+str(self.__serviceIntervall2)+"> default value is used" )
if ret[0].strip().lower() == "maxinactivitytime":
if CmplTools.strIsNumber(ret[1].strip()):
self.__maxInactivityTime = int(ret[1].strip())
continue
else:
CmplServerTools.cmplLogging( self.__logFile, "Wrong option maxInactivityTime in CmplServer option file <"+self.__optFileName+"> default value is used" )
if ret[0].strip().lower() == "maxservertries":
if CmplTools.strIsNumber(ret[1].strip()):
self.__maxServerTries = int(ret[1].strip())
else:
CmplServerTools.cmplLogging( self.__logFile, "Wrong maxServerTries in CmplServer option file <"+str(self.__maxServerTries)+"> default value is used" )
"""if ret[0].strip().lower() == "solvers":
self.__solvers=ret[1].split()"""
f.close()
#undocumented - only used for the pyCmpl test script
if port != None:
self.__cmplPort = port
except IOError, e:
raise CmplServerException( "Cannot read CmplServer option file <"+self.__optFileName+"> " + str(e), self.__logFile )
#*********** end constructor *******
#*********** destructor ************
def __del__(self ):
if self.__serviceThreadHandler!=None:
self.__serviceThreadHandler.kill()
self.__logFile.close()
#*********** end destructor ********
#****************************************************************************
# public methods
#****************************************************************************
#*********** startCmplGridScheduler *************
def startCmplGridScheduler(self):
try:
self.__server = CmplXmlRpcServer(("", self.__cmplPort), logRequests=False)
self.__server.register_function(self.getJobId)
self.__server.register_function(self.knock)
self.__server.register_function(self.cmplServerFailed)
self.__server.register_function(self.getServerId)
self.__server.register_function(self.addEmptyProblem)
self.__server.register_function(self.disconnectServer)
self.__server.register_function(self.disconnectProblem)
self.__server.register_function(self.stopServer)
self.__server.register_function(self.status)
CmplServerTools.cmplLogging( self.__logFile, "CmplGridScheduler has been started | port: " + str(self.__cmplPort) + " | serviceIntervall: " + str(self.__serviceIntervall) )
self.shutdown = False
thread.start_new_thread(self.__serviceThread, () )
while not self.shutdown:
self.__server.handle_request()
except:
print "CmplGridScheduler error: " , str(sys.exc_info()[1])
#*********** end startCmplGridScheduler **********
#*********** getJobId **************
def getJobId(self, cmplName, solver, compatibility=0):
id = "G"+self.__server.client_address[0] + "-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) + "-" + str(random.randint(100000, 999999))
status = CMPLGRID_SCHEDULER_OK
statusMessage = ""
if int(compatibility)!=self.__compatibility:
status = CMPLSERVER_ERROR
statusMessage = "Incompatible CmplServer client with compatibilty stage "+str(compatibility) + " instead of " + str(self.__compatibility)
else:
statusMessage = str(self.__compatibility)
tmpSolver=solver.lower()
if not tmpSolver in self.__solvers:
status=CMPLSERVER_ERROR
statusMessage="Unsupported solver <"+ solver + ">"
if status==CMPLGRID_SCHEDULER_OK:
CmplServerTools.cmplLogging( self.__logFile, "Problem has been registered" , id, cmplName )
if len(self.__problemQueue)>0:
status = CMPLGRID_SCHEDULER_BUSY
self.__problemQueue.append(id)
self.__problemQueueInfos.update( {id : ProblemQueueHandler(cmplName, tmpSolver) } )
CmplServerTools.cmplLogging( self.__logFile, "CmplGrid is busy: Problem is moved to the problem queue. " , id, cmplName)
statusMessage = "CmplGrid is busy: Problem is moved to the problem queue. "
else:
bestServer, status = self.__getBestServer(tmpSolver)
if status==CMPLGRID_SCHEDULER_SOLVER_NOT_AVAILABLE:
status = CMPLSERVER_ERROR
statusMessage = "Solver <"+solver + "> not available in the CmplGrid "
elif status==CMPLGRID_SCHEDULER_BUSY:
self.__problemQueue.append(id)
self.__problemQueueInfos.update( {id : ProblemQueueHandler(cmplName, tmpSolver) } )
self.__problemQueueInfos[id].setStatus(status)
status = CMPLGRID_SCHEDULER_BUSY
CmplServerTools.cmplLogging( self.__logFile, "CmplGrid is busy: Problem is moved to the problem queue. ", id, cmplName)
if self.__nrOfActiveServers()>0:
statusMessage = "CmplGrid is busy: Problem is moved to the problem queue. "
else:
statusMessage = "No server available at the moment in the CMPLGrid: Problem is moved to the problem queue. "
self.__schedulerStatus = CMPLGRID_SCHEDULER_BUSY
self.__schedulerStatusTxt = "CMPLGrid scheduler is busy"
elif status==CMPLGRID_SCHEDULER_OK:
if self.__sendProblemToServer( bestServer, id, cmplName):
statusMessage = self.__serverList[bestServer].cmplServerUrl
return [ status, statusMessage, id]
#*********** end getJobId ************
#*********** knock **************
def knock(self, id):
status=CMPLGRID_SCHEDULER_UNKNOWN
statusMessage=""
serverUrl=""
if id in self.__problemQueueInfos:
status = self.__problemQueueInfos[id].status
serverId = self.__problemQueueInfos[id].cmplServerId
if status == CMPLGRID_SCHEDULER_OK:
if serverId == None:
status = CMPLGRID_SCHEDULER_BUSY
statusMessage = "CmplGrid scheduler is waiting for a free CmplServer"
else:
if self.__checkId(serverId):
statusMessage=self.__serverList[serverId].cmplServerUrl
del self.__problemQueueInfos[id]
else:
status = CMPLGRID_SCHEDULER_ERROR
statusMessage = "...Server isn't connected"
elif status==CMPLGRID_SCHEDULER_PROBLEM_DELETED:
status = CMPLGRID_SCHEDULER_ERROR
statusMessage = "The problem was to long inactive and was therefore deleted. "
del self.__problemQueueInfos[id]
else:
status = CMPLGRID_SCHEDULER_ERROR
statusMessage = "Problem is not connected to CMPLGrid <"+id+">"
return [status, statusMessage, id ]
#*********** end knock **************
#*********** cmplServerFailed **************
def cmplServerFailed(self, cmplUrl):
status=CMPLGRID_SCHEDULER_WARNING
statusMessage="Unknown CmplServer can't registred as inactive <"+cmplUrl+">"
for s in self.__serverList:
if self.__serverList[s].cmplServerUrl==cmplUrl:
self.__serverList[s].setActive(False)
status=CMPLGRID_SCHEDULER_OK
statusMessage="CmplServer is now registred as inactive <"+cmplUrl+">"
CmplServerTools.cmplLogging( self.__logFile, statusMessage )
return [status, statusMessage, "" ]
#*********** end cmplServerFailed **************
#*********** getServerId **************
def getServerId(self, port, maxProblems, performanceIndex, solvers, compatibility=0):
tmpUrl = self.__server.client_address[0]+":"+str(port)
id = tmpUrl + "-"+ str(random.randint(100000, 999999))
status = CMPLGRID_SCHEDULER_OK
statusMessage=""
if type(port)!=int:
status= CMPLGRID_SCHEDULER_ERROR
statusMessage = "Wrong CmplServer port ", port
else:
tmpUrl= "http://"+tmpUrl
self.__serverList.update( { id: CmplServerHandler(tmpUrl, int(maxProblems) , int(performanceIndex) ) } )
if int(compatibility)!=self.__compatibility:
status= CMPLGRID_SCHEDULER_ERROR
statusMessage = "Incompatible CmplServer with compatibilty stage "+str(compatibility) + " instead of " + str(self.__compatibility)
else:
statusMessage = str(self.__compatibility)
self.__serverList[id].setLastActivityTime(time.time())
self.__serverList[id].setSolvers(solvers)
try:
self.__serverList[id].setCmplServer( xmlrpclib.ServerProxy( self.__serverList[id].cmplServerUrl , allow_none=False) )
except:
CmplServerTools.cmplLogging( self.__logFile, "CmplServer can't connect - no bi-directional connection :"+ str(sys.exc_info()[1]) , id )
status = CMPLGRID_SCHEDULER_ERROR
statusMessage = "CmplServer can't connect - no bi-directional connect :"+ str(sys.exc_info()[1])
if status == CMPLGRID_SCHEDULER_OK:
CmplServerTools.cmplLogging( self.__logFile, "CmplServer has been connected: solver "+str(self.__serverList[id].solvers)+" : maxProblems :" + str(self.__serverList[id].emptyProblems) + ">" , id )
return [ status, statusMessage, id]
#*********** end getServerId ************
#*********** addEmptyProblem **************
def addEmptyProblem(self, serverId):
if self.__checkId(serverId):
self.__serverList[serverId].addEmptyProblem()
status = CMPLGRID_SCHEDULER_OK
statusMessage = "Empty problem has added"
CmplServerTools.cmplLogging( self.__logFile, "CmplServer has added empty problem " , serverId )
else:
status = CMPLGRID_SCHEDULER_ERROR
statusMessage = "Server isn't connected"
return [status, statusMessage, "" ]
#*********** end addEmptyProblem **************
#*********** disconnectServer **************
def disconnectServer(self, id):
status=None
statusMessage=None
if id in self.__serverList:
del self.__serverList[id]
status = CMPLGRID_SCHEDULER_OK
statusMessage = "CmplServer <" + id +"> disconnected"
CmplServerTools.cmplLogging( self.__logFile, "CmplServer has been disconnected " , id )
else:
status = CMPLGRID_SCHEDULER_WARNING
statusMessage = "CmplServer <" + id +"> wasn't connected"
return [ status, statusMessage, ""]
#*********** end disconnectServer ************
#*********** disconnectProblem **************
def disconnectProblem(self, id):
status=None
statusMessage=None
if id in self.__problemQueue:
del self.__problemQueue[self.__problemQueue.index(id)]
status = CMPLGRID_SCHEDULER_OK
statusMessage = "Problem <" + id +"> disconnected"
CmplServerTools.cmplLogging( self.__logFile, "Problem has been disconnected from problem queue." , id )
else:
status = CMPLGRID_SCHEDULER_WARNING
statusMessage = "Problem <" + id +"> wasn't connected"
return [ status, statusMessage, ""]
#*********** end disconnectProblem ************
#*********** stopServer **************
def stopServer(self):
if self.__server.client_address[0] == "127.0.0.1":
while len( self.__serverList) >0:
id = self.__serverList.keys()[0]
ret=self.__cmplServerExecute(self.__serverList[id].cmplServer, "disconnectFromScheduler", id)
if ret[0]==CMPLSERVER_OK:
self.disconnectServer(id)
else:
CmplServerTools.cmplLogging( self.__logFile, "Can't disconnect CmplServer <" + id +"> : " + ret[1])
self.__serverList.clear()
self.shutdown = True
CmplServerTools.cmplLogging( self.__logFile, "CmplGridScheduler has been stopped" )
return True
else:
return False
#*********** end stopServer **********
#*********** status ***************
def status(self):
#CmplServerTools.cmplLogging( self.__logFile, "Status check: " + str(self.__schedulerStatus) )
return [self.__schedulerStatus, self.__schedulerStatusTxt, ""]
#*********** end status ***********
#****************************************************************************
# private methods
#****************************************************************************
#*********** checkId ***************
def __checkId(self, id) :
return id in self.__serverList
#*********** end checkId ***********
#*********** nrOfActiveServers ***************
def __nrOfActiveServers(self) :
i=0
for s in self.__serverList:
if self.__serverList[s].isActive:
i+=1
return i
#*********** end __nrOfActiveServers ***********
#*********** __serviceThread ******
def __serviceThread(self):
lastActivityTime=time.time()
status = CMPLGRID_SCHEDULER_OK
while True:
ret=[]
if self.shutdown==True:
break
time.sleep(self.__serviceIntervall)
if time.time()-lastActivityTime>self.__serviceIntervall2:
self.__cleanOldProblems()
lastActivityTime=time.time()
if len(self.__problemQueue)>0:
tmpId=self.__problemQueue.pop(0)
bestServer, status = self.__getBestServer(self.__problemQueueInfos[tmpId].solver)
if status==CMPLGRID_SCHEDULER_SOLVER_NOT_AVAILABLE:
self.__problemQueueInfos[tmpId].setStatus=CMPLGRID_SCHEDULER_PROBLEM_DELETED
status = CMPLSERVER_ERROR
statusMessage = "Solver <"+solver + "> not available in the CmplGrid "
elif status==CMPLGRID_SCHEDULER_BUSY:
self.__problemQueue.insert(0,tmpId)
self.__problemQueueInfos[tmpId].setStatus(status)
elif status==CMPLGRID_SCHEDULER_OK:
ans = self.__sendProblemToServer(bestServer, tmpId, self.__problemQueueInfos[tmpId].cmplName)
if ans==True:
self.__problemQueueInfos[tmpId].setStatus(CMPLGRID_SCHEDULER_OK)
self.__problemQueueInfos[tmpId].setCmplServerId(bestServer)
else:
self.__problemQueue.insert(0,tmpId)
self.__problemQueueInfos[tmpId].setStatus(CMPLGRID_SCHEDULER_BUSY)
if len(self.__problemQueue)>0:
self.__schedulerStatus = CMPLGRID_SCHEDULER_BUSY
self.__schedulerStatusTxt = "CMPLGrid scheduler is busy"
else:
self.__schedulerStatus = CMPLGRID_SCHEDULER_OK
self.__schedulerStatusTxt = "CMPLGrid scheduler is running"
for s in self.__serverList:
if self.__serverList[s].isActive==False:
oldMaxTries=self.__maxCmplServerTries
self.__maxCmplServerTries=1
ret=self.__cmplServerExecute(self.__serverList[s].cmplServer, "status")
self.__maxCmplServerTries=oldMaxTries
if ret[0]==CMPLSERVER_OK:
self.__serverList[s].setActive(True)
self.__serverList[s].setEmptyProblems(ret[2])
CmplServerTools.cmplLogging( self.__logFile, "CmplServer is now registred as active <"+self.__serverList[s].cmplServerUrl+"> " + str(self.__serverList[s].emptyProblems) )
#******* end __serviceThread *******
#*********** cleanOldProblems ******
def __cleanOldProblems(self):
delList = []
for id in self.__problemQueue:
if (time.time()-self.__problemQueueInfos[id].lastActivityTime)>self.__maxInactivityTime:
delList.append(id)
for id in delList:
self.__problemQueueInfos[id].setStatus=CMPLGRID_SCHEDULER_PROBLEM_DELETED
del self.__problemQueue[id]
CmplServerTools.cmplLogging( self.__logFile, "Inactive problem has been canceled and deregistered" , id, self.__problemQueue[id].cmplName)
#******* end __cleanOldProblems ******
#*********** cmplServerExecute *******
def __cmplServerExecute(self, server, method="", *args):
ret=[]
tries=0
while True:
try:
if method=="status":
ret = server.status()
if method=="sendJobId":
ret = server.sendJobId(args[0], args[1], args[2])
if method=="disconnectFromScheduler":
ret = server.disconnectFromScheduler(args[0])
except :
tries+=1
if tries==self.__maxCmplServerTries:
ret=[CMPLSERVER_ERROR, str(sys.exc_info()[1]) , ""]
break
else:
continue
break
return ret
#******** end cmplServerExecute *******
#*********** __sendProblemToServer **************
def __sendProblemToServer(self, bestServer, id, cmplName):
ans=True
ret = self.__cmplServerExecute(self.__serverList[bestServer].cmplServer, "sendJobId", cmplName, id, bestServer)
if ret[0] == CMPLSERVER_ERROR:
self.__serverList[bestServer].setActive(False)
ans = False
else:
CmplServerTools.cmplLogging( self.__logFile, "Problem has been sent to CmplServer <"+ bestServer +"> " , id, cmplName )
ans=True
return ans
#*********** end __sendProblemToServer ************
#*********** getBestServer **************
def __getBestServer(self, solver):
bestServer=None
status=CMPLGRID_SCHEDULER_OK
bestFactor=0
activeServerFound=False
for s in self.__serverList:
if self.__serverList[s].isActive:
if solver in self.__serverList[s].solvers:
activeServerFound=True
tmpFactor=(self.__serverList[s].emptyProblems/self.__serverList[s].maxProblems * self.__serverList[s].performanceIndex)
if tmpFactor > bestFactor:
bestFactor = tmpFactor
bestServer = s
if bestServer!=None:
self.__serverList[bestServer].removeEmptyProblem()
else:
if not activeServerFound:
status=CMPLGRID_SCHEDULER_SOLVER_NOT_AVAILABLE
else:
status=CMPLGRID_SCHEDULER_BUSY
return (bestServer, status)
#*********** end getBestServer ************
#################################################################################
# End CmplGridScheduler
#################################################################################
|
apache-2.0
| 5,686,667,388,055,705,000
| 30.897668
| 201
| 0.610867
| false
| 3.447774
| false
| false
| false
|
nycz/gimptools
|
NyczAddTextOutline.py
|
1
|
1238
|
#!/usr/bin/env python2
from gimpfu import *
## WORKFLOW
#
# Right-click on layer -> alpha to selection
# Grow selection by 1 pixel
# Make a new empty layer
# Fill selection with black
# Move new layer below old layer
# Merge down old layer
def add_text_outline(image, layer):
gimp.pdb.gimp_image_undo_group_start(image)
layer_name = layer.name
gimp.pdb.gimp_image_select_item(image, CHANNEL_OP_ADD, layer)
if gimp.pdb.gimp_selection_is_empty(image):
return
gimp.pdb.gimp_selection_grow(image, 1)
new_layer = gimp.Layer(image, 'outline', image.width, image.height, RGBA_IMAGE, 100, NORMAL_MODE)
top_pos = image.layers.index(layer)
image.add_layer(new_layer, top_pos+1)
gimp.pdb.gimp_edit_fill(new_layer, BACKGROUND_FILL)
gimp.pdb.gimp_selection_none(image)
final_layer = gimp.pdb.gimp_image_merge_down(image, layer, NORMAL_MODE)
final_layer.name = layer_name
gimp.pdb.gimp_image_undo_group_end(image)
return
register(
"nycz_add_text_outline",
"Add black outline to a text layer",
"Add black outline to a text layer",
"Nycz",
"Nycz",
"August 2015",
"<Image>/Nycz/Outline text",
"RGBA*",
[],
[],
add_text_outline,
)
main()
|
mit
| -366,173,237,773,243,100
| 25.913043
| 101
| 0.668821
| false
| 2.954654
| false
| false
| false
|
iLoop2/ResInsight
|
ThirdParty/Ert/devel/python/python/ert/test/source_enumerator.py
|
1
|
2491
|
import os
import re
class SourceEnumerator(object):
@classmethod
def findDevRoot(cls, root_directory_name = "devel", verbose=False):
dev_root = os.path.dirname(os.path.realpath(__file__))
while True:
if verbose:
print("Looking at: %s" % dev_root)
dev_path = os.path.join(dev_root , root_directory_name)
if os.path.exists( dev_path ):
dev_root = os.path.join(dev_root , root_directory_name)
if verbose:
print("break: %s" % dev_path)
break
head, tail = os.path.split(dev_root)
dev_root = head
if tail == "":
raise ValueError("Source root: '%s' not found!" % root_directory_name)
if verbose:
print("Returning: %s " % dev_root)
return dev_root
@classmethod
def findSourceFile(cls, path):
dev_root = SourceEnumerator.findDevRoot()
source_file = os.path.join(dev_root, path)
if not os.path.exists(source_file):
raise ValueError("File not found: %s:%s" % (path , source_file))
return source_file
@classmethod
def removeComments(cls, code_string):
code_string = re.sub(re.compile("/\*.*?\*/",re.DOTALL ) ,"" ,code_string) # remove all occurance streamed comments (/*COMMENT */) from string
code_string = re.sub(re.compile("//.*?\n" ) ,"" ,code_string) # remove all occurance singleline comments (//COMMENT\n ) from string
return code_string
@classmethod
def findEnum(cls, enum_name, full_source_file_path):
with open(full_source_file_path, "r") as f:
text = f.read()
text = SourceEnumerator.removeComments(text)
enum_pattern = re.compile("typedef\s+enum\s+\{(.*?)\}\s*(\w+?);", re.DOTALL)
for enum in enum_pattern.findall(text):
if enum[1] == enum_name:
return enum[0]
raise ValueError("Enum with name: '%s' not found!" % enum_name)
@classmethod
def findEnumerators(cls, enum_name, source_file_path):
enum_text = SourceEnumerator.findEnum(enum_name, SourceEnumerator.findSourceFile(source_file_path))
enumerator_pattern = re.compile("(\w+?)\s*?=\s*?(\d+)")
enumerators = []
for enumerator in enumerator_pattern.findall(enum_text):
enumerators.append((enumerator[0], int(enumerator[1])))
return enumerators
|
gpl-3.0
| -6,384,688,368,757,242,000
| 33.123288
| 149
| 0.577278
| false
| 3.729042
| false
| false
| false
|
cskyan/chmannot
|
bin/chm_gendata.py
|
1
|
14312
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
###########################################################################
# Copyright (C) 2013-2016 by Caspar. All rights reserved.
# File Name: chm_gendata.py
# Author: Shankai Yan
# E-mail: sk.yan@my.cityu.edu.hk
# Created Time: 2016-03-01 22:15:59
###########################################################################
#
import os
import sys
import logging
import ast
from optparse import OptionParser
import numpy as np
import scipy as sp
import pandas as pd
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer, MinMaxScaler
from sklearn.decomposition import LatentDirichletAllocation, NMF, TruncatedSVD
import bionlp.spider.pubmed as pm
import bionlp.spider.metamap as mm
from bionlp import ftslct, ftdecomp
from bionlp.util import fs, io, sampling
import hoc
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
PAR_DIR = os.path.abspath(os.path.join(FILE_DIR, os.path.pardir))
CONFIG_FILE = os.path.join(PAR_DIR, 'etc', 'config.yaml')
SPDR_MAP = {'hoc':hoc, 'pbmd':pm}
SC=';;'
opts, args = {}, []
cfgr = None
spdr = pm
def gen_data():
if (opts.local):
X, Y = spdr.get_data(None, from_file=True)
else:
pmid_list = spdr.get_pmids()
articles = spdr.fetch_artcls(pmid_list)
X, Y = spdr.get_data(articles, ft_type=opts.type, max_df=ast.literal_eval(opts.maxdf), min_df=ast.literal_eval(opts.mindf), fmt=opts.fmt, spfmt=opts.spfmt)
hallmarks = Y.columns
# Feature Selection
# mt = sp.sparse.coo_matrix(X)
# mask_mt = np.zeros(mt.shape)
# mask_mt[mt.row, mt.col] = 1
# stat = mask_mt.sum(axis=0)
# cln_X = X.iloc[:,np.arange(stat.shape[0])[stat>ast.literal_eval(opts.thrshd) * (stat.max() - stat.min()) + stat.min()]]
# Document Frequence
# stat, _ = ftslct.freqs(X.values, Y.values)
# Mutual information
# stat, _ = ftslct.mutual_info(X.values, Y.values)
# Information gain
# stat, _ = ftslct.info_gain(X.values, Y.values)
# GSS coefficient
# stat, _ = ftslct.gss_coef(X.values, Y.values)
# NGL coefficient
# stat, _ = ftslct.ngl_coef(X.values, Y.values)
# Odds ratio
# stat, _ = ftslct.odds_ratio(X.values, Y.values)
# Fisher criterion
# stat, _ = ftslct.fisher_crtrn(X.values, Y.values)
# GU metric
# stat, _ = ftslct.gu_metric(X.values, Y.values)
# Decision tree
# stat, _ = ftslct.decision_tree(X.values, Y.values)
# Combined feature
stat, _ = ftslct.utopk(X.values, Y.values, ftslct.decision_tree, fn=100)
io.write_npz(stat, os.path.join(spdr.DATA_PATH, 'ftw.npz'))
# cln_X = X.iloc[:,np.arange(stat.shape[0])[stat>stat.min()]]
cln_X = X.iloc[:,stat.argsort()[-500:][::-1]]
print 'The size of data has been changed from %s to %s.' % (X.shape, cln_X.shape)
if (opts.fmt == 'npz'):
io.write_df(cln_X, os.path.join(spdr.DATA_PATH, 'cln_X.npz'), with_idx=True, sparse_fmt=opts.spfmt, compress=True)
else:
cln_X.to_csv(os.path.join(spdr.DATA_PATH, 'cln_X.csv'), encoding='utf8')
del X, cln_X
for i in range(Y.shape[1]):
y = Y.iloc[:,i]
if (opts.fmt == 'npz'):
io.write_df(y, os.path.join(spdr.DATA_PATH, 'y_%s.npz' % i), with_col=False, with_idx=True)
else:
y.to_csv(os.path.join(spdr.DATA_PATH, 'y_%s.csv' % i), encoding='utf8')
def samp_data(sp_size = 0.3):
pid = opts.pid
if (pid != None):
iter_size = 30
X_iter, labels= spdr.get_feats_iter('y_%s.csv' % pid, iter_size)
new_X, new_y = sampling.samp_df_iter(X_iter, iter_size, labels, sp_size)
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'samp_X_%i.csv' % pid), encoding='utf8')
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'samp_y_%s.csv' % pid), encoding='utf8')
else:
for i in range(10):
iter_size = 30
X_iter, labels= spdr.get_feats_iter('y_%s.csv' % i, iter_size)
new_X, new_y = sampling.samp_df_iter(X_iter, iter_size, labels, sp_size)
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'samp_X_%i.csv' % i), encoding='utf8')
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'samp_y_%s.csv' % i), encoding='utf8')
def extend_mesh(ft_type='binary'):
X, Y = spdr.get_data(None, ft_type=opts.type, max_df=ast.literal_eval(opts.maxdf), min_df=ast.literal_eval(opts.mindf), from_file=True, fmt=opts.fmt, spfmt=opts.spfmt)
mesh_df = mm.mesh_countvec(X.index)
mesh_df.columns = ['extmesh_' + x for x in mesh_df.columns]
new_X = pd.concat([X, mesh_df], axis=1, join_axes=[X.index])
print 'The size of data has been changed from %s to %s.' % (X.shape, new_X.shape)
if (opts.fmt == 'npz'):
io.write_df(new_X, os.path.join(spdr.DATA_PATH, 'extmesh_X.npz'), with_idx=True, sparse_fmt=opts.spfmt, compress=True)
else:
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'extmesh_X.csv'), encoding='utf8')
def expand_data(ft_type='binary', db_name='mesh2016', db_type='LevelDB', store_path='store'):
from rdflib import Graph
from bionlp.util import ontology
X, Y = spdr.get_data(None, ft_type=opts.type, max_df=ast.literal_eval(opts.maxdf), min_df=ast.literal_eval(opts.mindf), from_file=True, fmt=opts.fmt, spfmt=opts.spfmt)
mesh_cols = filter(lambda x: x.startswith('mesh_') or x.startswith('extmesh_'), X.columns)
mesh_X = X.loc[:,mesh_cols]
exp_meshx = set([])
ext_meshx_dict = {}
g = Graph(store=db_type, identifier=db_name)
g.open(store_path)
for col in mesh_X.columns:
mesh_lb = col.strip('extmesh_').strip('mesh_').replace('"', '\\"')
# Get similar MeSH terms
em_set = set(ontology.slct_sim_terms(g, mesh_lb, prdns=[('meshv',ontology.MESHV)], eqprds=ontology.MESH_EQPRDC_MAP))
# Overall extended MeSH terms
exp_meshx |= em_set
# Extended MeSH terms per column
ext_meshx_dict[col] = em_set
g.close()
exp_mesh_X = pd.DataFrame(np.zeros((mesh_X.shape[0], len(exp_meshx)), dtype='int8'), index=X.index, columns=['expmesh_%s' % w for w in exp_meshx])
# Append the similar MeSH terms of each column to the final matrix
for col, sim_mesh in ext_meshx_dict.iteritems():
if (len(sim_mesh) == 0): continue
sim_cols = ['expmesh_%s' % w for w in sim_mesh]
if (ft_type == 'binary'):
exp_mesh_X.loc[:,sim_cols] = np.logical_or(exp_mesh_X.loc[:,sim_cols], mesh_X.loc[:,col].reshape((-1,1))).astype('int')
elif (ft_type == 'numeric'):
exp_mesh_X.loc[:,sim_cols] += mesh_X.loc[:,col].reshape((-1,1))
elif (ft_type == 'tfidf'):
pass
new_X = pd.concat([X, exp_mesh_X], axis=1, join_axes=[X.index])
print 'The size of data has been changed from %s to %s.' % (X.shape, new_X.shape)
if (opts.fmt == 'npz'):
io.write_df(new_X, os.path.join(spdr.DATA_PATH, 'exp_X.npz'), with_idx=True, sparse_fmt=opts.spfmt, compress=True)
else:
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'exp_X.csv'), encoding='utf8')
def decomp_data(method='LDA', n_components=100):
X, Y = spdr.get_data(None, ft_type=opts.type, max_df=ast.literal_eval(opts.maxdf), min_df=ast.literal_eval(opts.mindf), from_file=True, fmt=opts.fmt, spfmt=opts.spfmt)
method = method.upper()
n_components = min(n_components, X.shape[1])
if (method == 'LDA'):
model = make_pipeline(LatentDirichletAllocation(n_topics=n_components, learning_method='online', learning_offset=50., max_iter=5, n_jobs=opts.np, random_state=0), Normalizer(copy=False))
elif (method == 'NMF'):
model = make_pipeline(NMF(n_components=n_components, random_state=0, alpha=.1, l1_ratio=.5), Normalizer(copy=False))
elif (method == 'LSI'):
model = make_pipeline(TruncatedSVD(n_components), Normalizer(copy=False))
elif (method == 'TSNE'):
model = make_pipeline(ftdecomp.DecompTransformer(n_components, ftdecomp.t_sne, initial_dims=15*n_components, perplexity=30.0))
if (opts.prefix == 'all'):
td_cols = X.columns
else:
# Only apply dimension reduction on specific columns
td_cols = np.array(map(lambda x: True if any(x.startswith(prefix) for prefix in opts.prefix.split(SC)) else False, X.columns))
td_X = X.loc[:,td_cols]
new_td_X = model.fit_transform(td_X.as_matrix())
if (opts.prefix == 'all'):
columns = range(new_td_X.shape[1]) if not hasattr(model.steps[0][1], 'components_') else td_X.columns[model.steps[0][1].components_.argmax(axis=1)]
new_X = pd.DataFrame(new_td_X, index=X.index, columns=['tp_%s' % x for x in columns])
else:
columns = range(new_td_X.shape[1]) if not hasattr(model.steps[0][1], 'components_') else td_X.columns[model.steps[0][1].components_.argmax(axis=1)]
# Concatenate the components and the columns are not applied dimension reduction on
new_X = pd.concat([pd.DataFrame(new_td_X, index=X.index, columns=['tp_%s' % x for x in columns]), X.loc[:,np.logical_not(td_cols)]], axis=1)
if (opts.fmt == 'npz'):
io.write_df(new_X, os.path.join(spdr.DATA_PATH, '%s%i_X.npz' % (method.lower(), n_components)), with_idx=True, sparse_fmt=opts.spfmt, compress=True)
else:
new_X.to_csv(os.path.join(spdr.DATA_PATH, '%s%i_X.csv' % (method.lower(), n_components)), encoding='utf8')
def add_d2v(n_components=100, win_size=8, min_t=5, mdl_fname='d2v.mdl'):
from gensim.parsing.preprocessing import preprocess_string
from gensim.models.doc2vec import TaggedDocument, Doc2Vec
def read_files(fpaths, code='ascii'):
for fpath in fpaths:
try:
yield TaggedDocument(words=preprocess_string('\n'.join(fs.read_file(fpath, code))), tags=[os.path.splitext(os.path.basename(fpath))[0]])
except Exception as e:
continue
def read_prcsed_files(fpaths, code='ascii'):
for fpath in fpaths:
try:
words = []
for line in fs.read_file(fpath, code):
if (line == '~~~'):
continue
if (line == '. . .' or line == '~~~ ~~~' or line == ', , ,'):
continue
items = line.split()
if (len(items) < 3): # Skip the unrecognized words
continue
words.append(items[2].lower())
yield TaggedDocument(words=words, tags=[os.path.splitext(os.path.basename(fpath))[0]])
except Exception as e:
continue
mdl_fpath = os.path.join(spdr.DATA_PATH, mdl_fname)
if (os.path.exists(mdl_fpath)):
model = Doc2Vec.load(mdl_fpath)
else:
# model = Doc2Vec(read_files(fs.listf(spdr.ABS_PATH, full_path=True)), size=n_components, window=8, min_count=5, workers=opts.np)
model = Doc2Vec(read_prcsed_files(fs.listf(os.path.join(spdr.DATA_PATH, 'lem'), full_path=True)), size=n_components, window=8, min_count=5, workers=opts.np)
model.save(os.path.join(spdr.DATA_PATH, mdl_fname))
X, Y = spdr.get_data(None, ft_type=opts.type, max_df=ast.literal_eval(opts.maxdf), min_df=ast.literal_eval(opts.mindf), from_file=True, fmt=opts.fmt, spfmt=opts.spfmt)
# Map the index of original matrix to that of the paragraph vectors
d2v_idx = [model.docvecs.index_to_doctag(i).rstrip('.lem') for i in range(model.docvecs.count)]
mms = MinMaxScaler()
d2v_X = pd.DataFrame(mms.fit_transform(model.docvecs[range(model.docvecs.count)]), index=d2v_idx, columns=['d2v_%i' % i for i in range(model.docvecs[0].shape[0])])
# d2v_X = pd.DataFrame(model.docvecs[range(model.docvecs.count)], index=d2v_idx, columns=['d2v_%i' % i for i in range(model.docvecs[0].shape[0])])
new_X = pd.concat([X, d2v_X], axis=1, join_axes=[X.index])
print 'The size of data has been changed from %s to %s.' % (X.shape, new_X.shape)
if (opts.fmt == 'npz'):
io.write_df(d2v_X, os.path.join(spdr.DATA_PATH, 'd2v_X.npz'), with_idx=True, sparse_fmt=opts.spfmt, compress=True)
io.write_df(new_X, os.path.join(spdr.DATA_PATH, 'cmb_d2v_X.npz'), with_idx=True, sparse_fmt=opts.spfmt, compress=True)
else:
d2v_X.to_csv(os.path.join(spdr.DATA_PATH, 'd2v_X.csv'), encoding='utf8')
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'cmb_d2v_X.csv'), encoding='utf8')
def main():
if (opts.method is None):
return
elif (opts.method == 'gen'):
gen_data()
elif (opts.method == 'samp'):
samp_data()
elif (opts.method == 'extend'):
extend_mesh()
elif (opts.method == 'expand'):
expand_data(store_path=os.path.join(spdr.DATA_PATH, 'store'))
elif (opts.method == 'decomp'):
decomp_data(method=opts.decomp.upper(), n_components=opts.cmpn)
elif (opts.method == 'd2v'):
add_d2v(n_components=opts.cmpn)
if __name__ == '__main__':
# Logging setting
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
# Parse commandline arguments
op = OptionParser()
op.add_option('-p', '--pid', action='store', type='int', dest='pid', help='indicate the process ID')
op.add_option('-n', '--np', default=-1, action='store', type='int', dest='np', help='indicate the number of processes used for training')
op.add_option('-f', '--fmt', default='npz', help='data stored format: csv or npz [default: %default]')
op.add_option('-s', '--spfmt', default='csr', help='sparse data stored format: csr or csc [default: %default]')
op.add_option('-l', '--local', default=False, action='store_true', dest='local', help='read data from the preprocessed data matrix file')
op.add_option('-t', '--type', default='binary', help='feature type: binary, numeric, tfidf [default: %default]')
op.add_option('-a', '--mindf', default='1', type='str', dest='mindf', help='lower document frequency threshold for term ignorance')
op.add_option('-b', '--maxdf', default='1.0', type='str', dest='maxdf', help='upper document frequency threshold for term ignorance')
op.add_option('-r', '--thrshd', default='0.05', type='str', dest='thrshd', help='feature frequency threshold for filtering')
op.add_option('-d', '--decomp', default='LDA', help='decomposition method to use: LDA, NMF, LSI or TSNE [default: %default]')
op.add_option('-c', '--cmpn', default=100, type='int', dest='cmpn', help='number of components that used in clustering model')
op.add_option('-j', '--prefix', default='all', type='str', dest='prefix', help='prefixes of the column names that the decomposition method acts on, for example, \'-j lem;;nn;;ner\' means columns that starts with \'lem_\', \'nn_\', or \'ner_\'')
op.add_option('-i', '--input', default='hoc', help='input source: hoc or pbmd [default: %default]')
op.add_option('-m', '--method', help='main method to run')
(opts, args) = op.parse_args()
if len(args) > 0:
op.print_help()
op.error('Please input options instead of arguments.')
sys.exit(1)
spdr = SPDR_MAP[opts.input]
# Parse config file
if (os.path.exists(CONFIG_FILE)):
cfgr = io.cfg_reader(CONFIG_FILE)
spdr_cfg = cfgr('bionlp.spider.%s' % opts.input, 'init')
if (len(spdr_cfg) > 0 and spdr_cfg['DATA_PATH'] is not None and os.path.exists(spdr_cfg['DATA_PATH'])):
spdr.DATA_PATH = spdr_cfg['DATA_PATH']
main()
|
apache-2.0
| -7,199,348,447,836,130,000
| 46.71
| 245
| 0.666015
| false
| 2.603128
| true
| false
| false
|
gawel/panoramisk
|
examples/fast_agi_server_ivr.py
|
1
|
1407
|
from pprint import pprint
import asyncio
from panoramisk import fast_agi
loop = asyncio.get_event_loop()
async def call_waiting(request):
pprint(['AGI variables:', request.headers])
pprint((await request.send_command('ANSWER')))
pprint((await request.send_command('SAY DIGITS 1 \"\"')))
# To Raise a 510 error - 510 Invalid or unknown command
pprint((await request.send_command('INVALID-COMMAND')))
# To Raise a 520 error - 520-Invalid command syntax. Proper usage follows:
pprint((await request.send_command('SAY PHONETIC Hello world .')))
pprint((await request.send_command('SAY NUMBER 100 \"\"')))
pprint((await request.send_command('GET DATA hello-world 5000 2')))
pprint((await request.send_command('EXEC StartMusicOnHold')))
pprint((await request.send_command('EXEC Wait 30')))
def main():
fa_app = fast_agi.Application(loop=loop)
fa_app.add_route('call_waiting', call_waiting)
coro = asyncio.start_server(fa_app.handler, '0.0.0.0', 4574, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until CTRL+c is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
if __name__ == '__main__':
main()
|
mit
| -8,811,220,548,279,284,000
| 28.93617
| 78
| 0.673063
| false
| 3.562025
| false
| false
| false
|
cdman/hnarchive
|
app.py
|
1
|
8451
|
import datetime
import logging
import re
import webapp2
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
import bs4
class Node(ndb.Model):
parent = ndb.KeyProperty('Node', indexed=False)
title = ndb.StringProperty(indexed=False)
url = ndb.TextProperty(indexed=False)
user = ndb.StringProperty(indexed=False)
body = ndb.TextProperty(indexed=False)
score = ndb.IntegerProperty(indexed=False)
comment_count = ndb.IntegerProperty(indexed=False)
added_at = ndb.DateTimeProperty(indexed=False)
retrieved_at = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
class PendingNode(ndb.Model):
added_at = ndb.DateTimeProperty(auto_now_add=True, indexed=True)
class MinMax(ndb.Model):
INSTANCE_KEY = ndb.Key('MinMax', 1)
low_bound = ndb.IntegerProperty(default=0, indexed=False)
upper_bound = ndb.IntegerProperty(default=1, indexed=False)
processed_nodes = ndb.IntegerProperty(default=0, indexed=False)
class Webpage(ndb.Model):
url = ndb.StringProperty(indexed=False)
fetched_at = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
html = ndb.BlobProperty(indexed=False, compressed=True)
def get(url):
assert url.startswith('https://news.ycombinator.com/')
result = urlfetch.fetch(url=url,
headers={'User-Agent': 'HNArchive - dify.ltd@gmail.com / https://github.com/cdman/hnarchive'})
logging.info('Retrieved %s', url)
assert result.status_code == 200
assert 'Hacker News' in result.content
ndb.non_transactional(Webpage(url=url, html=result.content).put)()
return bs4.BeautifulSoup(result.content, 'lxml')
@ndb.non_transactional(allow_existing=True)
def skipExisting(ids):
nodes = ndb.get_multi([ndb.Key(Node, i) for i in ids if i > 0])
keys = set([0] + [n.key.id() for n in nodes if n])
return [i for i in ids if not i in keys]
def extractUniqueIds(page):
return set([
long(re.sub(r'.*?(\d+)', r'\1', link['href']))
for link in page.find_all(href=re.compile(r'item\?id=\d+'))
])
@ndb.transactional(xg=True, propagation=ndb.TransactionOptions.INDEPENDENT)
def fetchListing(url):
listing = get(url)
minMax = MinMax.INSTANCE_KEY.get()
if not minMax:
minMax = MinMax(key=MinMax.INSTANCE_KEY)
ids = extractUniqueIds(listing)
new_ids = skipExisting(ids)
if not new_ids:
logging.info('No new ids found')
return
if max(new_ids) > minMax.upper_bound:
minMax.upper_bound = max(new_ids)
minMax.put()
logging.info('New upper bound: %d', max(new_ids))
ndb.non_transactional(ndb.put_multi)([
PendingNode(key=ndb.Key(PendingNode, i)) for i in new_ids])
logging.info('Discovered new nodes: %s', new_ids)
def fetchFrontPage():
fetchListing('https://news.ycombinator.com/')
def fetchNewest():
fetchListing('https://news.ycombinator.com/newest')
@ndb.transactional(xg=True, propagation=ndb.TransactionOptions.INDEPENDENT)
def fetchMin():
minMax = MinMax.INSTANCE_KEY.get()
if not minMax:
minMax = MinMax(key=MinMax.INSTANCE_KEY)
while True:
minMax.low_bound += 1
if minMax.low_bound >= minMax.upper_bound:
return
if ndb.non_transactional(ndb.Key(Node, minMax.low_bound).get)() is None:
break
ndb.put_multi([minMax, PendingNode(key=ndb.Key(PendingNode, minMax.low_bound))])
def extractMatch(text, pattern):
match = re.search(pattern, text)
if match is None: return
return match.group(1)
def populateFromMeta(node, meta, parent_id):
meta_text = meta.text
node.user = meta.find(href=re.compile(r'^user\?id=.+'))['href'].replace('user?id=', '')
node.key = ndb.Key(Node, long(
meta.find(href=re.compile(r'^item\?id=.+'))['href'].replace('item?id=', '')))
if extractMatch(meta_text, r'(\d+) points?'):
node.score = long(extractMatch(meta_text, r'(\d+) points?'))
if extractMatch(meta_text, r'(\d+) (?:minute|hour|day)s? ago'):
qty = long(extractMatch(meta_text, r'(\d+) (?:minute|hour|day)s? ago'))
metric = extractMatch(meta_text, r'\d+ (minute|hour|day)s? ago')
node.added_at = datetime.datetime.utcnow()
if metric == 'minute':
node.added_at -= datetime.timedelta(minutes=qty)
elif metric == 'hour':
node.added_at -= datetime.timedelta(hours=qty)
elif metric == 'day':
node.added_at -= datetime.timedelta(days=qty)
else:
assert False
if extractMatch(meta_text, r'(\d+) comments?'):
node.comment_count = long(extractMatch(meta_text, r'(\d+) comments?'))
parent = meta.find('a', text='parent')
if parent:
node.parent = ndb.Key(Node, long(parent['href'].replace('item?id=', '')))
else:
node.parent = ndb.Key(Node, parent_id)
@ndb.non_transactional
def parseTable(t, parent_id):
head = t.find('td', class_='title')
ids = []
if head is not None:
node = Node()
node.title = head.text
node.url = head.find('a')['href']
populateFromMeta(node, head.parent.parent.find_all('tr')[1], parent_id)
text = ''.join([unicode(n) for n in head.parent.parent.find_all('tr')[2:] if n.text.strip()])
text, _ = re.subn(r'</?t[dr]>', '', text)
if text:
node.body = text
node.put()
ids.append(node.key.id())
logging.info('Saved %d', node.key.id())
for comment in t.find_all('td', class_='default'):
parent_table = comment
while parent_table and parent_table.name != 'table':
parent_table = parent_table.parent
if parent_table and parent_table.find('a', text='link'):
pparent_id = long(parent_table.find('a', text='link')['href'].replace('item?id=', ''))
else:
pparent_id = parent_id
node = Node()
populateFromMeta(node, comment.find('span', class_='comhead'), pparent_id)
node.body = ''.join(
[unicode(c).strip() for c in comment.find('span', class_='comment').contents])
node.body = node.body.replace('<font color="#000000">', '').replace('</font>', '')
node.put()
ids.append(node.key.id())
logging.info('Saved %d', node.key.id())
return ids
@ndb.transactional(xg=True, propagation=ndb.TransactionOptions.INDEPENDENT)
def processsOneNode(pending_node):
page = get('https://news.ycombinator.com/item?id=%d' % pending_node.id())
ids = extractUniqueIds(page)
node_count = 0
for t in page.find_all('table'):
try:
table_ids = parseTable(t, pending_node.id())
ids -= set(table_ids)
node_count += len(table_ids)
except Exception:
logging.exception('Parsing failed')
new_ids = skipExisting(ids)
ndb.non_transactional(ndb.put_multi)([
PendingNode(key=ndb.Key(PendingNode, i)) for i in new_ids])
logging.info('Discovered new nodes: %s', new_ids)
pending_node.delete()
logging.info('Processed %d', pending_node.id())
minMax = MinMax.INSTANCE_KEY.get()
if not minMax:
minMax = MinMax(key=MinMax.INSTANCE_KEY)
minMax.processed_nodes += node_count
minMax.put()
@ndb.non_transactional
def fetchNode():
pending_node = PendingNode.query().order(PendingNode.added_at).fetch(1, keys_only=True)
if len(pending_node) == 0: return
pending_node = pending_node[0]
processsOneNode(pending_node)
class CrawlingPhase(ndb.Model):
INSTANCE_KEY = ndb.Key('CrawlingPhase', 1)
_STEPS = [fetchFrontPage, fetchNewest, fetchMin] + [fetchNode for _ in xrange(0, 7)]
state = ndb.IntegerProperty(default=0, indexed=False)
@staticmethod
@ndb.transactional(xg=True, propagation=ndb.TransactionOptions.INDEPENDENT)
def runNext():
instance = CrawlingPhase.INSTANCE_KEY.get()
if not instance:
instance = CrawlingPhase(key=CrawlingPhase.INSTANCE_KEY)
step = CrawlingPhase._STEPS[instance.state]
instance.state = (instance.state + 1) % len(CrawlingPhase._STEPS)
instance.put()
try:
step()
except Exception:
logging.exception('Step %s failed', step)
class Crawler(webapp2.RequestHandler):
def get(self):
CrawlingPhase.runNext()
self.response.write('Done')
app = webapp2.WSGIApplication([
('/task/crawl', Crawler),
])
|
gpl-3.0
| 3,507,052,515,939,519,000
| 33.076613
| 102
| 0.636611
| false
| 3.344282
| false
| false
| false
|
jdemon519/cfme_tests
|
sprout/appliances/tasks.py
|
1
|
83964
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import diaper
import fauxfactory
import hashlib
import iso8601
import random
import re
import command
import yaml
from contextlib import closing
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from celery import chain, chord, shared_task
from celery.exceptions import MaxRetriesExceededError
from datetime import datetime, timedelta
from functools import wraps
from lxml import etree
from novaclient.exceptions import OverLimit as OSOverLimit
from paramiko import SSHException
from urllib2 import urlopen, HTTPError
import socket
from appliances.models import (
Provider, Group, Template, Appliance, AppliancePool, DelayedProvisionTask,
MismatchVersionMailer, User, GroupShepherd)
from sprout import settings, redis
from sprout.irc_bot import send_message
from sprout.log import create_logger
from utils import conf
from utils.appliance import Appliance as CFMEAppliance
from utils.path import project_path
from utils.providers import get_mgmt
from utils.timeutil import parsetime
from utils.trackerbot import api, depaginate, parse_template
from utils.version import Version
from utils.wait import wait_for
LOCK_EXPIRE = 60 * 15 # 15 minutes
VERSION_REGEXPS = [
r"^cfme-(\d)(\d)(\d)(\d)(\d{2})", # 1.2.3.4.11
# newer format
r"cfme-(\d)(\d)(\d)[.](\d{2})-", # cfme-524.02- -> 5.2.4.2
r"cfme-(\d)(\d)(\d)[.](\d{2})[.](\d)-", # cfme-524.02.1- -> 5.2.4.2.1
# 4 digits
r"cfme-(?:nightly-)?(\d)(\d)(\d)(\d)-", # cfme-5242- -> 5.2.4.2
r"cfme-(\d)(\d)(\d)-(\d)-", # cfme-520-1- -> 5.2.0.1
# 5 digits (not very intelligent but no better solution so far)
r"cfme-(?:nightly-)?(\d)(\d)(\d)(\d{2})-", # cfme-53111- -> 5.3.1.11, cfme-53101 -> 5.3.1.1
]
VERSION_REGEXPS = map(re.compile, VERSION_REGEXPS)
VERSION_REGEXP_UPSTREAM = re.compile(r'^miq-stable-([^-]+)-')
TRACKERBOT_PAGINATE = 20
def retrieve_cfme_appliance_version(template_name):
"""If possible, retrieve the appliance's version from template's name."""
for regexp in VERSION_REGEXPS:
match = regexp.search(template_name)
if match is not None:
return ".".join(map(str, map(int, match.groups())))
else:
match = VERSION_REGEXP_UPSTREAM.search(template_name)
if match is not None:
return match.groups()[0]
def trackerbot():
return api()
def none_dict(l):
""""If the parameter passed is None, returns empty dict. Otherwise it passes through"""
if l is None:
return {}
else:
return l
def provider_error_logger():
return create_logger("provider_errors")
def logged_task(*args, **kwargs):
kwargs["bind"] = True
def f(task):
@wraps(task)
def wrapped_task(self, *args, **kwargs):
self.logger = create_logger(task)
try:
return task(self, *args, **kwargs)
except Exception as e:
self.logger.error(
"An exception occured when executing with args: %r kwargs: %r",
args, kwargs)
self.logger.exception(e)
raise
return shared_task(*args, **kwargs)(wrapped_task)
return f
def singleton_task(*args, **kwargs):
kwargs["bind"] = True
wait = kwargs.pop('wait', False)
wait_countdown = kwargs.pop('wait_countdown', 10)
wait_retries = kwargs.pop('wait_retries', 30)
def f(task):
@wraps(task)
def wrapped_task(self, *args, **kwargs):
self.logger = create_logger(task)
# Create hash of all args
digest_base = "/".join(str(arg) for arg in args)
keys = sorted(kwargs.keys())
digest_base += "//" + "/".join("{}={}".format(key, kwargs[key]) for key in keys)
digest = hashlib.sha256(digest_base).hexdigest()
lock_id = '{0}-lock-{1}'.format(self.name, digest)
if cache.add(lock_id, 'true', LOCK_EXPIRE):
try:
return task(self, *args, **kwargs)
except Exception as e:
self.logger.error(
"An exception occured when executing with args: %r kwargs: %r",
args, kwargs)
self.logger.exception(e)
raise
finally:
cache.delete(lock_id)
elif wait:
self.logger.info("Waiting for another instance of the task to end.")
self.retry(args=args, countdown=wait_countdown, max_retries=wait_retries)
return shared_task(*args, **kwargs)(wrapped_task)
return f
@singleton_task()
def kill_unused_appliances(self):
"""This is the watchdog, that guards the appliances that were given to users. If you forget
to prolong the lease time, this is the thing that will take the appliance off your hands
and kill it."""
with transaction.atomic():
for appliance in Appliance.objects.filter(marked_for_deletion=False):
if appliance.leased_until is not None and appliance.leased_until <= timezone.now():
self.logger.info("Watchdog found an appliance that is to be deleted: {}/{}".format(
appliance.id, appliance.name))
kill_appliance.delay(appliance.id)
@singleton_task()
def kill_appliance(self, appliance_id, replace_in_pool=False, minutes=60):
"""As-reliable-as-possible appliance deleter. Turns off, deletes the VM and deletes the object.
If the appliance was assigned to pool and we want to replace it, redo the provisioning.
"""
self.logger.info("Initiated kill of appliance {}".format(appliance_id))
appliance = Appliance.objects.get(id=appliance_id)
workflow = [
disconnect_direct_lun.si(appliance_id),
appliance_power_off.si(appliance_id),
kill_appliance_delete.si(appliance_id),
]
if replace_in_pool:
if appliance.appliance_pool is not None:
workflow.append(
replace_clone_to_pool.si(
appliance.template.version, appliance.template.date,
appliance.appliance_pool.id, minutes, appliance.template.id))
workflow = chain(*workflow)
workflow()
@singleton_task()
def kill_appliance_delete(self, appliance_id, _delete_already_issued=False):
delete_issued = False
try:
appliance = Appliance.objects.get(id=appliance_id)
if appliance.provider_api.does_vm_exist(appliance.name):
appliance.set_status("Deleting the appliance from provider")
# If we haven't issued the delete order, do it now
if not _delete_already_issued:
appliance.provider_api.delete_vm(appliance.name)
delete_issued = True
# In any case, retry to wait for the VM to be deleted, but next time do not issue delete
self.retry(args=(appliance_id, True), countdown=5, max_retries=60)
appliance.delete()
except ObjectDoesNotExist:
# Appliance object already not there
return
except Exception as e:
try:
appliance.set_status("Could not delete appliance. Retrying.")
except UnboundLocalError:
return # The appliance is not there any more
# In case of error retry, and also specify whether the delete order was already issued
self.retry(
args=(appliance_id, _delete_already_issued or delete_issued),
exc=e, countdown=5, max_retries=60)
@singleton_task()
def poke_trackerbot(self):
"""This beat-scheduled task periodically polls the trackerbot if there are any new templates.
"""
template_usability = []
# Extract data from trackerbot
tbapi = trackerbot()
objects = depaginate(tbapi, tbapi.providertemplate().get(limit=TRACKERBOT_PAGINATE))["objects"]
per_group = {}
for obj in objects:
if obj["template"]["group"]["name"] == 'unknown':
continue
if obj["template"]["group"]["name"] not in per_group:
per_group[obj["template"]["group"]["name"]] = []
per_group[obj["template"]["group"]["name"]].append(obj)
# Sort them using the build date
for group in per_group.iterkeys():
per_group[group] = sorted(
per_group[group],
reverse=True, key=lambda o: o["template"]["datestamp"])
objects = []
# And interleave the the groups
while any(per_group.values()):
for key in per_group.iterkeys():
if per_group[key]:
objects.append(per_group[key].pop(0))
for template in objects:
if template["provider"]["key"] not in conf.cfme_data.management_systems.keys():
# If we don't use that provider in yamls, set the template as not usable
# 1) It will prevent adding this template if not added
# 2) It'll mark the template as unusable if it already exists
template["usable"] = False
template_usability.append(
(
template["provider"]["key"],
template["template"]["name"],
template["usable"]
)
)
if not template["usable"]:
continue
group, create = Group.objects.get_or_create(id=template["template"]["group"]["name"])
# Check if the template is already obsolete
if group.template_obsolete_days is not None:
build_date = parsetime.from_iso_date(template["template"]["datestamp"])
if build_date <= (parsetime.today() - timedelta(days=group.template_obsolete_days)):
# It is already obsolete, so ignore it
continue
provider, create = Provider.objects.get_or_create(id=template["provider"]["key"])
if not provider.is_working:
continue
if "sprout" not in provider.provider_data:
continue
if not provider.provider_data.get("use_for_sprout", False):
continue
template_name = template["template"]["name"]
ga_released = template['template']['ga_released']
date = parse_template(template_name).datestamp
if not date:
# Not a CFME/MIQ template, ignore it.
continue
# Original one
original_template = None
try:
original_template = Template.objects.get(
provider=provider, template_group=group, original_name=template_name,
name=template_name, preconfigured=False)
if original_template.ga_released != ga_released:
original_template.ga_released = ga_released
original_template.save()
except ObjectDoesNotExist:
if template_name in provider.templates:
date = parse_template(template_name).datestamp
if date is None:
self.logger.warning(
"Ignoring template {} because it does not have a date!".format(
template_name))
continue
template_version = retrieve_cfme_appliance_version(template_name)
if template_version is None:
# Make up a faux version
# First 3 fields of version get parsed as a zstream
# therefore ... makes it a "nil" stream
template_version = "...{}".format(date.strftime("%Y%m%d"))
with transaction.atomic():
tpl = Template(
provider=provider, template_group=group, original_name=template_name,
name=template_name, preconfigured=False, date=date,
version=template_version, ready=True, exists=True, usable=True)
tpl.save()
original_template = tpl
self.logger.info("Created a new template #{}".format(tpl.id))
# If the provider is set to not preconfigure templates, do not bother even doing it.
if provider.num_simultaneous_configuring > 0:
# Preconfigured one
try:
preconfigured_template = Template.objects.get(
provider=provider, template_group=group, original_name=template_name,
preconfigured=True)
if preconfigured_template.ga_released != ga_released:
preconfigured_template.ga_released = ga_released
preconfigured_template.save()
except ObjectDoesNotExist:
if template_name in provider.templates:
original_id = original_template.id if original_template is not None else None
create_appliance_template.delay(
provider.id, group.id, template_name, source_template_id=original_id)
# If any of the templates becomes unusable, let sprout know about it
# Similarly if some of them becomes usable ...
for provider_id, template_name, usability in template_usability:
provider, create = Provider.objects.get_or_create(id=provider_id)
with transaction.atomic():
for template in Template.objects.filter(provider=provider, original_name=template_name):
template.usable = usability
template.save()
# Kill all shepherd appliances if they were acidentally spun up
if not usability:
for appliance in Appliance.objects.filter(
template=template, marked_for_deletion=False,
appliance_pool=None):
Appliance.kill(appliance)
@logged_task()
def create_appliance_template(self, provider_id, group_id, template_name, source_template_id=None):
"""This task creates a template from a fresh CFME template. In case of fatal error during the
operation, the template object is deleted to make sure the operation will be retried next time
when poke_trackerbot runs."""
provider = Provider.objects.get(id=provider_id)
provider.cleanup() # Precaution
group = Group.objects.get(id=group_id)
with transaction.atomic():
# Limit the number of concurrent template configurations
if provider.remaining_configuring_slots == 0:
return False # It will be kicked again when trackerbot gets poked
try:
Template.objects.get(
template_group=group, provider=provider, original_name=template_name,
preconfigured=True)
return False
except ObjectDoesNotExist:
pass
# Fire off the template preparation
date = parse_template(template_name).datestamp
if not date:
return
template_version = retrieve_cfme_appliance_version(template_name)
if template_version is None:
# Make up a faux version
# First 3 fields of version get parsed as a zstream
# therefore ... makes it a "nil" stream
template_version = "...{}".format(date.strftime("%Y%m%d"))
new_template_name = settings.TEMPLATE_FORMAT.format(
group=group.id, date=date.strftime("%y%m%d"), rnd=fauxfactory.gen_alphanumeric(8))
if provider.template_name_length is not None:
allowed_length = provider.template_name_length
# There is some limit
if len(new_template_name) > allowed_length:
# Cut it down
randoms_length = len(new_template_name.rsplit("_", 1)[-1])
minimum_length = (len(new_template_name) - randoms_length) + 1 # One random must be
if minimum_length <= allowed_length:
# Just cut it
new_template_name = new_template_name[:allowed_length]
else:
# Another solution
new_template_name = settings.TEMPLATE_FORMAT.format(
group=group.id[:2], date=date.strftime("%y%m%d"), # Use only first 2 of grp
rnd=fauxfactory.gen_alphanumeric(2)) # And just 2 chars random
# TODO: If anything larger comes, do fix that!
if source_template_id is not None:
try:
source_template = Template.objects.get(id=source_template_id)
except ObjectDoesNotExist:
source_template = None
else:
source_template = None
template = Template(
provider=provider, template_group=group, name=new_template_name, date=date,
version=template_version, original_name=template_name, parent_template=source_template)
template.save()
workflow = chain(
prepare_template_deploy.si(template.id),
prepare_template_verify_version.si(template.id),
prepare_template_configure.si(template.id),
prepare_template_seal.si(template.id),
prepare_template_poweroff.si(template.id),
prepare_template_finish.si(template.id),
)
workflow.link_error(prepare_template_delete_on_error.si(template.id))
workflow()
@singleton_task()
def prepare_template_deploy(self, template_id):
template = Template.objects.get(id=template_id)
try:
if not template.exists_in_provider:
template.set_status("Deploying the template.")
provider_data = template.provider.provider_data
kwargs = provider_data["sprout"]
kwargs["power_on"] = True
if "allowed_datastores" not in kwargs and "allowed_datastores" in provider_data:
kwargs["allowed_datastores"] = provider_data["allowed_datastores"]
self.logger.info("Deployment kwargs: {}".format(repr(kwargs)))
template.provider_api.deploy_template(
template.original_name, vm_name=template.name, **kwargs)
else:
template.set_status("Waiting for deployment to be finished.")
template.provider_api.wait_vm_running(template.name)
except Exception as e:
template.set_status(
"Could not properly deploy the template. Retrying. {}: {}".format(
type(e).__name__, str(e)))
self.logger.exception(e)
self.retry(args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Template deployed.")
@singleton_task()
def prepare_template_verify_version(self, template_id):
template = Template.objects.get(id=template_id)
template.set_status("Verifying version.")
appliance = CFMEAppliance(template.provider_name, template.name, container=template.container)
appliance.ipapp.wait_for_ssh()
try:
true_version = appliance.version
except Exception as e:
template.set_status("Some SSH error happened during appliance version check.")
self.retry(args=(template_id,), exc=e, countdown=20, max_retries=5)
supposed_version = Version(template.version)
if true_version is None or true_version.vstring == 'master':
return
if true_version != supposed_version:
# Check if the difference is not just in the suffixes, which can be the case ...
t = str(true_version)
s = str(supposed_version)
if supposed_version.version == true_version.version or t.startswith(s):
# The two have same version but different suffixes, apply the suffix to the template obj
# OR also a case - when the supposed version is incomplete so we will use the detected
# version.
with transaction.atomic():
template.version = t
template.save()
if template.parent_template is not None:
# In case we have a parent template, update the version there too.
if template.version != template.parent_template.version:
pt = template.parent_template
pt.version = template.version
pt.save()
return # no need to continue with spamming process
# SPAM SPAM SPAM!
with transaction.atomic():
mismatch_in_db = MismatchVersionMailer.objects.filter(
provider=template.provider,
template_name=template.original_name,
supposed_version=supposed_version,
actual_version=true_version)
if not mismatch_in_db:
mismatch = MismatchVersionMailer(
provider=template.provider,
template_name=template.original_name,
supposed_version=supposed_version,
actual_version=true_version)
mismatch.save()
# Run the task to mail the problem
mailer_version_mismatch.delay()
raise Exception("Detected version mismatch!")
@singleton_task()
def prepare_template_configure(self, template_id):
template = Template.objects.get(id=template_id)
template.set_status("Customization started.")
appliance = CFMEAppliance(template.provider_name, template.name, container=template.container)
try:
appliance.configure(
setup_fleece=False,
log_callback=lambda s: template.set_status("Customization progress: {}".format(s)))
except Exception as e:
template.set_status("Could not properly configure the CFME. Retrying.")
self.retry(args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Template configuration was done.")
@singleton_task()
def prepare_template_seal(self, template_id):
template = Template.objects.get(id=template_id)
template.set_status("Sealing template.")
try:
template.cfme.ipapp.seal_for_templatizing()
except Exception as e:
template.set_status("Could not seal the template. Retrying.")
self.retry(
args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Template sealed.")
@singleton_task()
def prepare_template_poweroff(self, template_id):
template = Template.objects.get(id=template_id)
template.set_status("Powering off")
try:
template.provider_api.stop_vm(template.name)
template.provider_api.wait_vm_stopped(template.name)
except Exception as e:
template.set_status("Could not power off the appliance. Retrying.")
self.retry(args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Powered off.")
@singleton_task()
def prepare_template_finish(self, template_id):
template = Template.objects.get(id=template_id)
template.set_status("Finishing template creation.")
try:
if template.temporary_name is None:
tmp_name = "templatize_{}".format(fauxfactory.gen_alphanumeric(8))
Template.objects.get(id=template_id).temporary_name = tmp_name # metadata, autosave
else:
tmp_name = template.temporary_name
template.provider_api.mark_as_template(
template.name, temporary_name=tmp_name, delete_on_error=False)
with transaction.atomic():
template = Template.objects.get(id=template_id)
template.ready = True
template.exists = True
template.save()
del template.temporary_name
except Exception as e:
template.set_status("Could not mark the appliance as template. Retrying.")
self.retry(args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Template preparation finished.")
@singleton_task()
def prepare_template_delete_on_error(self, template_id):
try:
template = Template.objects.get(id=template_id)
except ObjectDoesNotExist:
return True
template.set_status("Template creation failed. Deleting it.")
try:
if template.provider_api.does_vm_exist(template.name):
template.provider_api.delete_vm(template.name)
wait_for(template.provider_api.does_vm_exist, [template.name], timeout='5m', delay=10)
if template.provider_api.does_template_exist(template.name):
template.provider_api.delete_template(template.name)
wait_for(
template.provider_api.does_template_exist, [template.name], timeout='5m', delay=10)
if (template.temporary_name is not None and
template.provider_api.does_template_exist(template.temporary_name)):
template.provider_api.delete_template(template.temporary_name)
wait_for(
template.provider_api.does_template_exist,
[template.temporary_name], timeout='5m', delay=10)
template.delete()
except Exception as e:
self.retry(args=(template_id,), exc=e, countdown=60, max_retries=5)
@logged_task()
def request_appliance_pool(self, appliance_pool_id, time_minutes):
"""This task gives maximum possible amount of spinned-up appliances to the specified pool and
then if there is need to spin up another appliances, it spins them up via clone_template_to_pool
task."""
self.logger.info(
"Appliance pool {} requested for {} minutes.".format(appliance_pool_id, time_minutes))
pool = AppliancePool.objects.get(id=appliance_pool_id)
n = Appliance.give_to_pool(pool)
for i in range(pool.total_count - n):
tpls = pool.possible_provisioning_templates
if tpls:
template_id = tpls[0].id
clone_template_to_pool(template_id, pool.id, time_minutes)
else:
with transaction.atomic():
task = DelayedProvisionTask(pool=pool, lease_time=time_minutes)
task.save()
apply_lease_times_after_pool_fulfilled.delay(appliance_pool_id, time_minutes)
@singleton_task()
def apply_lease_times_after_pool_fulfilled(self, appliance_pool_id, time_minutes):
pool = AppliancePool.objects.get(id=appliance_pool_id)
if pool.fulfilled:
for appliance in pool.appliances:
apply_lease_times.delay(appliance.id, time_minutes)
rename_appliances_for_pool.delay(pool.id)
with transaction.atomic():
pool.finished = True
pool.save()
else:
# Look whether we can swap any provisioning appliance with some in shepherd
unfinished = list(Appliance.objects.filter(appliance_pool=pool, ready=False).all())
random.shuffle(unfinished)
if len(unfinished) > 0:
n = Appliance.give_to_pool(pool, len(unfinished))
with transaction.atomic():
for _ in range(n):
appl = unfinished.pop()
appl.appliance_pool = None
appl.save()
try:
self.retry(args=(appliance_pool_id, time_minutes), countdown=30, max_retries=120)
except MaxRetriesExceededError: # Bad luck, pool fulfillment failed. So destroy it.
pool.logger.error("Waiting for fulfillment failed. Initiating the destruction process.")
pool.kill()
@singleton_task()
def process_delayed_provision_tasks(self):
"""This picks up the provisioning tasks that were delayed due to ocncurrency limit of provision.
Goes one task by one and when some of them can be provisioned, it starts the provisioning and
then deletes the task.
"""
for task in DelayedProvisionTask.objects.order_by("id"):
if task.pool.not_needed_anymore:
task.delete()
continue
# Try retrieve from shepherd
appliances_given = Appliance.give_to_pool(task.pool, 1)
if appliances_given == 0:
# No free appliance in shepherd, so do it on our own
tpls = task.pool.possible_provisioning_templates
if task.provider_to_avoid is not None:
filtered_tpls = filter(lambda tpl: tpl.provider != task.provider_to_avoid, tpls)
if filtered_tpls:
# There are other providers to provision on, so try one of them
tpls = filtered_tpls
# If there is no other provider to provision on, we will use the original list.
# This will cause additional rejects until the provider quota is met
if tpls:
clone_template_to_pool(tpls[0].id, task.pool.id, task.lease_time)
task.delete()
else:
# Try freeing up some space in provider
for provider in task.pool.possible_providers:
appliances = provider.free_shepherd_appliances.exclude(
task.pool.appliance_container_q,
**task.pool.appliance_filter_params)
if appliances:
Appliance.kill(random.choice(appliances))
break # Just one
else:
# There was a free appliance in shepherd, so we took it and we don't need this task more
task.delete()
@logged_task()
def replace_clone_to_pool(
self, version, date, appliance_pool_id, time_minutes, exclude_template_id):
appliance_pool = AppliancePool.objects.get(id=appliance_pool_id)
if appliance_pool.not_needed_anymore:
return
exclude_template = Template.objects.get(id=exclude_template_id)
templates = appliance_pool.possible_templates
templates_excluded = filter(lambda tpl: tpl != exclude_template, templates)
if templates_excluded:
template = random.choice(templates_excluded)
else:
template = exclude_template # :( no other template to use
clone_template_to_pool(template.id, appliance_pool_id, time_minutes)
def clone_template_to_pool(template_id, appliance_pool_id, time_minutes):
template = Template.objects.get(id=template_id)
new_appliance_name = settings.APPLIANCE_FORMAT.format(
group=template.template_group.id,
date=template.date.strftime("%y%m%d"),
rnd=fauxfactory.gen_alphanumeric(8))
with transaction.atomic():
pool = AppliancePool.objects.get(id=appliance_pool_id)
if pool.not_needed_anymore:
return
# Apply also username
new_appliance_name = "{}_{}".format(pool.owner.username, new_appliance_name)
appliance = Appliance(template=template, name=new_appliance_name, appliance_pool=pool)
appliance.save()
# Set pool to these params to keep the appliances with same versions/dates
pool.version = template.version
pool.date = template.date
pool.save()
clone_template_to_appliance.delay(appliance.id, time_minutes, pool.yum_update)
@logged_task()
def apply_lease_times(self, appliance_id, time_minutes):
self.logger.info(
"Applying lease time {} minutes on appliance {}".format(time_minutes, appliance_id))
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.datetime_leased = timezone.now()
appliance.leased_until = appliance.datetime_leased + timedelta(minutes=int(time_minutes))
appliance.save()
@logged_task()
def clone_template(self, template_id):
self.logger.info("Cloning template {}".format(template_id))
template = Template.objects.get(id=template_id)
new_appliance_name = settings.APPLIANCE_FORMAT.format(
group=template.template_group.id,
date=template.date.strftime("%y%m%d"),
rnd=fauxfactory.gen_alphanumeric(8))
appliance = Appliance(template=template, name=new_appliance_name)
appliance.save()
clone_template_to_appliance.delay(appliance.id)
@singleton_task()
def clone_template_to_appliance(self, appliance_id, lease_time_minutes=None, yum_update=False):
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_status("Beginning deployment process")
tasks = [
clone_template_to_appliance__clone_template.si(appliance_id, lease_time_minutes),
clone_template_to_appliance__wait_present.si(appliance_id),
appliance_power_on.si(appliance_id),
]
if yum_update:
tasks.append(appliance_yum_update.si(appliance_id))
tasks.append(appliance_reboot.si(appliance_id, if_needs_restarting=True))
if appliance.preconfigured:
tasks.append(wait_appliance_ready.si(appliance_id))
else:
tasks.append(mark_appliance_ready.si(appliance_id))
workflow = chain(*tasks)
if Appliance.objects.get(id=appliance_id).appliance_pool is not None:
# Case of the appliance pool
if Appliance.objects.get(id=appliance_id).appliance_pool.not_needed_anymore:
return
# TODO: Make replace_in_pool work again
workflow.link_error(
kill_appliance.si(appliance_id, replace_in_pool=False, minutes=lease_time_minutes))
else:
# Case of shepherd
workflow.link_error(kill_appliance.si(appliance_id))
workflow()
@singleton_task()
def clone_template_to_appliance__clone_template(self, appliance_id, lease_time_minutes):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present, terminating the chain
self.request.callbacks[:] = []
return
if appliance.appliance_pool is not None:
if appliance.appliance_pool.not_needed_anymore:
# Terminate task chain
self.request.callbacks[:] = []
kill_appliance.delay(appliance_id)
return
appliance.provider.cleanup()
try:
if not appliance.provider_api.does_vm_exist(appliance.name):
appliance.set_status("Beginning template clone.")
provider_data = appliance.template.provider.provider_data
kwargs = provider_data["sprout"]
kwargs["power_on"] = False
if "allowed_datastores" not in kwargs and "allowed_datastores" in provider_data:
kwargs["allowed_datastores"] = provider_data["allowed_datastores"]
if appliance.appliance_pool is not None:
if appliance.appliance_pool.override_memory is not None:
kwargs['ram'] = appliance.appliance_pool.override_memory
if appliance.appliance_pool.override_cpu is not None:
kwargs['cpu'] = appliance.appliance_pool.override_cpu
appliance.provider_api.deploy_template(
appliance.template.name, vm_name=appliance.name,
progress_callback=lambda progress: appliance.set_status(
"Deploy progress: {}".format(progress)),
**kwargs)
except Exception as e:
messages = {"limit", "cannot add", "quota"}
if isinstance(e, OSOverLimit):
appliance.set_status("Hit OpenStack provisioning quota, trying putting it aside ...")
elif any(message in str(e).lower() for message in messages):
appliance.set_status("Provider has some troubles, putting it aside ... {}/{}".format(
type(e).__name__, str(e)
))
provider_error_logger().exception(e)
else:
# Something got screwed really bad
appliance.set_status("Error happened: {}({})".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id, lease_time_minutes), exc=e, countdown=60, max_retries=5)
# Ignore that and provision it somewhere else
if appliance.appliance_pool:
# We can put it aside for a while to wait for
self.request.callbacks[:] = [] # Quit this chain
pool = appliance.appliance_pool
try:
if appliance.provider_api.does_vm_exist(appliance.name):
# Better to check it, you never know when does that fail
appliance.provider_api.delete_vm(appliance.name)
wait_for(
appliance.provider_api.does_vm_exist,
[appliance.name], timeout='5m', delay=10)
except:
pass # Diaper here
appliance.delete(do_not_touch_ap=True)
with transaction.atomic():
new_task = DelayedProvisionTask(
pool=pool, lease_time=lease_time_minutes,
provider_to_avoid=appliance.template.provider)
new_task.save()
return
else:
# We cannot put it aside, so just try that again
self.retry(args=(appliance_id, lease_time_minutes), exc=e, countdown=60, max_retries=5)
else:
appliance.set_status("Template cloning finished. Refreshing provider VMs to get UUID.")
refresh_appliances_provider.delay(appliance.provider.id)
@singleton_task()
def clone_template_to_appliance__wait_present(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present, terminating the chain
self.request.callbacks[:] = []
return
if appliance.appliance_pool is not None:
if appliance.appliance_pool.not_needed_anymore:
# Terminate task chain
self.request.callbacks[:] = []
kill_appliance.delay(appliance_id)
return
try:
appliance.set_status("Waiting for the appliance to become visible in provider.")
if not appliance.provider_api.does_vm_exist(appliance.name):
self.retry(args=(appliance_id,), countdown=20, max_retries=30)
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id,), exc=e, countdown=20, max_retries=30)
else:
appliance.set_status("Template was successfully cloned.")
with diaper:
appliance.synchronize_metadata()
@singleton_task()
def mark_appliance_ready(self, appliance_id):
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ready = True
appliance.save()
Appliance.objects.get(id=appliance_id).set_status("Appliance was marked as ready")
@singleton_task()
def appliance_power_on(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present
return
try:
if appliance.provider_api.is_vm_running(appliance.name):
try:
current_ip = appliance.provider_api.current_ip_address(appliance.name)
except Exception:
current_ip = None
if current_ip is not None:
# IP present
Appliance.objects.get(id=appliance_id).set_status("Appliance was powered on.")
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ip_address = current_ip
appliance.set_power_state(Appliance.Power.ON)
appliance.save()
if appliance.containerized:
with appliance.ipapp.ssh_client as ssh:
# Fire up the container
ssh.run_command('cfme-start', ensure_host=True)
# VM is running now.
sync_appliance_hw.delay(appliance.id)
sync_provider_hw.delay(appliance.template.provider.id)
return
else:
# IP not present yet
Appliance.objects.get(id=appliance_id).set_status("Appliance waiting for IP.")
self.retry(args=(appliance_id, ), countdown=20, max_retries=40)
elif not appliance.provider_api.in_steady_state(appliance.name):
appliance.set_status("Waiting for appliance to be steady (current state: {}).".format(
appliance.provider_api.vm_status(appliance.name)))
self.retry(args=(appliance_id, ), countdown=20, max_retries=40)
else:
appliance.set_status("Powering on.")
appliance.provider_api.start_vm(appliance.name)
self.retry(args=(appliance_id, ), countdown=20, max_retries=40)
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id, ), exc=e, countdown=20, max_retries=30)
@singleton_task()
def appliance_reboot(self, appliance_id, if_needs_restarting=False):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present
return
try:
if if_needs_restarting:
with appliance.ssh as ssh:
if int(ssh.run_command("needs-restarting | wc -l").output.strip()) == 0:
return # No reboot needed
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_power_state(Appliance.Power.REBOOTING)
appliance.save()
appliance.ipapp.reboot(wait_for_web_ui=False, log_callback=appliance.set_status)
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_power_state(Appliance.Power.ON)
appliance.save()
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id, ), exc=e, countdown=20, max_retries=30)
@singleton_task()
def appliance_power_off(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present
return
try:
if appliance.provider_api.is_vm_stopped(appliance.name):
Appliance.objects.get(id=appliance_id).set_status("Appliance was powered off.")
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_power_state(Appliance.Power.OFF)
appliance.ready = False
appliance.save()
sync_provider_hw.delay(appliance.template.provider.id)
return
elif appliance.provider_api.is_vm_suspended(appliance.name):
appliance.set_status("Starting appliance from suspended state to properly off it.")
appliance.provider_api.start_vm(appliance.name)
self.retry(args=(appliance_id,), countdown=20, max_retries=40)
elif not appliance.provider_api.in_steady_state(appliance.name):
appliance.set_status("Waiting for appliance to be steady (current state: {}).".format(
appliance.provider_api.vm_status(appliance.name)))
self.retry(args=(appliance_id,), countdown=20, max_retries=40)
else:
appliance.set_status("Powering off.")
appliance.provider_api.stop_vm(appliance.name)
self.retry(args=(appliance_id,), countdown=20, max_retries=40)
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id,), exc=e, countdown=20, max_retries=40)
@singleton_task()
def appliance_suspend(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present
return
try:
if appliance.provider_api.is_vm_suspended(appliance.name):
Appliance.objects.get(id=appliance_id).set_status("Appliance was suspended.")
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_power_state(Appliance.Power.SUSPENDED)
appliance.ready = False
appliance.save()
sync_provider_hw.delay(appliance.template.provider.id)
return
elif not appliance.provider_api.in_steady_state(appliance.name):
appliance.set_status("Waiting for appliance to be steady (current state: {}).".format(
appliance.provider_api.vm_status(appliance.name)))
self.retry(args=(appliance_id,), countdown=20, max_retries=30)
else:
appliance.set_status("Suspending.")
appliance.provider_api.suspend_vm(appliance.name)
self.retry(args=(appliance_id,), countdown=20, max_retries=30)
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id,), exc=e, countdown=20, max_retries=30)
@singleton_task()
def retrieve_appliance_ip(self, appliance_id):
"""Updates appliance's IP address."""
try:
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_status("Retrieving IP address.")
ip_address = appliance.provider_api.current_ip_address(appliance.name)
if ip_address is None:
self.retry(args=(appliance_id,), countdown=30, max_retries=20)
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ip_address = ip_address
appliance.save()
except ObjectDoesNotExist:
# source object is not present, terminating
return
else:
appliance.set_status("IP address retrieved.")
@singleton_task()
def refresh_appliances(self):
"""Dispatches the appliance refresh process among the providers"""
self.logger.info("Initiating regular appliance provider refresh")
for provider in Provider.objects.filter(working=True, disabled=False):
refresh_appliances_provider.delay(provider.id)
@singleton_task(soft_time_limit=180)
def refresh_appliances_provider(self, provider_id):
"""Downloads the list of VMs from the provider, then matches them by name or UUID with
appliances stored in database.
"""
self.logger.info("Refreshing appliances in {}".format(provider_id))
provider = Provider.objects.get(id=provider_id)
if not hasattr(provider.api, "all_vms"):
# Ignore this provider
return
vms = provider.api.all_vms()
dict_vms = {}
uuid_vms = {}
for vm in vms:
dict_vms[vm.name] = vm
if vm.uuid:
uuid_vms[vm.uuid] = vm
for appliance in Appliance.objects.filter(template__provider=provider):
if appliance.uuid is not None and appliance.uuid in uuid_vms:
vm = uuid_vms[appliance.uuid]
# Using the UUID and change the name if it changed
appliance.name = vm.name
appliance.ip_address = vm.ip
appliance.set_power_state(Appliance.POWER_STATES_MAPPING.get(
vm.power_state, Appliance.Power.UNKNOWN))
appliance.save()
elif appliance.name in dict_vms:
vm = dict_vms[appliance.name]
# Using the name, and then retrieve uuid
appliance.uuid = vm.uuid
appliance.ip_address = vm.ip
appliance.set_power_state(Appliance.POWER_STATES_MAPPING.get(
vm.power_state, Appliance.Power.UNKNOWN))
appliance.save()
self.logger.info("Retrieved UUID for appliance {}/{}: {}".format(
appliance.id, appliance.name, appliance.uuid))
else:
# Orphaned :(
appliance.set_power_state(Appliance.Power.ORPHANED)
appliance.save()
@singleton_task()
def check_templates(self):
self.logger.info("Initiated a periodic template check")
for provider in Provider.objects.all():
check_templates_in_provider.delay(provider.id)
@singleton_task(soft_time_limit=180)
def check_templates_in_provider(self, provider_id):
self.logger.info("Initiated a periodic template check for {}".format(provider_id))
provider = Provider.objects.get(id=provider_id)
# Get templates and update metadata
try:
templates = map(str, provider.api.list_template())
except:
provider.working = False
provider.save()
else:
provider.working = True
provider.save()
with provider.edit_metadata as metadata:
metadata["templates"] = templates
if not provider.working:
return
# Check Sprout template existence
# expiration_time = (timezone.now() - timedelta(**settings.BROKEN_APPLIANCE_GRACE_TIME))
for template in Template.objects.filter(provider=provider):
with transaction.atomic():
tpl = Template.objects.get(pk=template.pk)
exists = tpl.name in templates
tpl.exists = exists
tpl.save()
# if not exists:
# if len(Appliance.objects.filter(template=template).all()) == 0\
# and template.status_changed < expiration_time:
# # No other appliance is made from this template so no need to keep it
# with transaction.atomic():
# tpl = Template.objects.get(pk=template.pk)
# tpl.delete()
@singleton_task()
def delete_nonexistent_appliances(self):
"""Goes through orphaned appliances' objects and deletes them from the database."""
expiration_time = (timezone.now() - timedelta(**settings.ORPHANED_APPLIANCE_GRACE_TIME))
for appliance in Appliance.objects.filter(ready=True).all():
if appliance.name in redis.renaming_appliances:
continue
if appliance.power_state == Appliance.Power.ORPHANED:
if appliance.power_state_changed > expiration_time:
# Ignore it for now
continue
self.logger.info(
"I will delete orphaned appliance {}/{}".format(appliance.id, appliance.name))
try:
appliance.delete()
except ObjectDoesNotExist as e:
if "AppliancePool" in str(e):
# Someone managed to delete the appliance pool before
appliance.appliance_pool = None
appliance.save()
appliance.delete()
else:
raise # No diaper pattern here!
# If something happened to the appliance provisioning process, just delete it to remove
# the garbage. It will be respinned again by shepherd.
# Grace time is specified in BROKEN_APPLIANCE_GRACE_TIME
expiration_time = (timezone.now() - timedelta(**settings.BROKEN_APPLIANCE_GRACE_TIME))
for appliance in Appliance.objects.filter(ready=False, marked_for_deletion=False).all():
if appliance.status_changed < expiration_time:
self.logger.info("Killing broken appliance {}/{}".format(appliance.id, appliance.name))
Appliance.kill(appliance) # Use kill because the appliance may still exist
# And now - if something happened during appliance deletion, call kill again
for appliance in Appliance.objects.filter(
marked_for_deletion=True, status_changed__lt=expiration_time).all():
with transaction.atomic():
appl = Appliance.objects.get(pk=appliance.pk)
appl.marked_for_deletion = False
appl.save()
self.logger.info(
"Trying to kill unkilled appliance {}/{}".format(appliance.id, appliance.name))
Appliance.kill(appl)
def generic_shepherd(self, preconfigured):
"""This task takes care of having the required templates spinned into required number of
appliances. For each template group, it keeps the last template's appliances spinned up in
required quantity. If new template comes out of the door, it automatically kills the older
running template's appliances and spins up new ones. Sorts the groups by the fulfillment."""
for gs in sorted(
GroupShepherd.objects.all(), key=lambda g: g.get_fulfillment_percentage(preconfigured)):
prov_filter = {'provider__user_groups': gs.user_group}
group_versions = Template.get_versions(
template_group=gs.template_group, ready=True, usable=True, preconfigured=preconfigured,
container=None, **prov_filter)
group_dates = Template.get_dates(
template_group=gs.template_group, ready=True, usable=True, preconfigured=preconfigured,
container=None, **prov_filter)
if group_versions:
# Downstream - by version (downstream releases)
version = group_versions[0]
# Find the latest date (one version can have new build)
dates = Template.get_dates(
template_group=gs.template_group, ready=True, usable=True,
version=group_versions[0], preconfigured=preconfigured, container=None,
**prov_filter)
if not dates:
# No template yet?
continue
date = dates[0]
filter_keep = {"version": version, "date": date, 'container': None}
filters_kill = []
for kill_date in dates[1:]:
filters_kill.append({"version": version, "date": kill_date})
for kill_version in group_versions[1:]:
filters_kill.append({"version": kill_version})
elif group_dates:
# Upstream - by date (upstream nightlies)
filter_keep = {"date": group_dates[0], 'container': None}
filters_kill = [{"date": v} for v in group_dates[1:]]
else:
continue # Ignore this group, no templates detected yet
filter_keep.update(prov_filter)
for filt in filters_kill:
filt.update(prov_filter)
# Keeping current appliances
# Retrieve list of all templates for given group
# I know joins might be a bit better solution but I'll leave that for later.
possible_templates = list(
Template.objects.filter(
usable=True, ready=True, template_group=gs.template_group,
preconfigured=preconfigured, **filter_keep).all())
# If it can be deployed, it must exist
possible_templates_for_provision = filter(lambda tpl: tpl.exists, possible_templates)
appliances = []
for template in possible_templates:
appliances.extend(
Appliance.objects.filter(
template=template, appliance_pool=None, marked_for_deletion=False))
# If we then want to delete some templates, better kill the eldest. status_changed
# says which one was provisioned when, because nothing else then touches that field.
appliances.sort(key=lambda appliance: appliance.status_changed)
pool_size = gs.template_pool_size if preconfigured else gs.unconfigured_template_pool_size
if len(appliances) < pool_size and possible_templates_for_provision:
# There must be some templates in order to run the provisioning
# Provision ONE appliance at time for each group, that way it is possible to maintain
# reasonable balancing
new_appliance_name = settings.APPLIANCE_FORMAT.format(
group=template.template_group.id,
date=template.date.strftime("%y%m%d"),
rnd=fauxfactory.gen_alphanumeric(8))
with transaction.atomic():
# Now look for templates that are on non-busy providers
tpl_free = filter(
lambda t: t.provider.free,
possible_templates_for_provision)
if tpl_free:
appliance = Appliance(
template=sorted(tpl_free, key=lambda t: t.provider.appliance_load)[0],
name=new_appliance_name)
appliance.save()
if tpl_free:
self.logger.info(
"Adding an appliance to shepherd: {}/{}".format(appliance.id, appliance.name))
clone_template_to_appliance.delay(appliance.id, None)
elif len(appliances) > pool_size:
# Too many appliances, kill the surplus
# Only kill those that are visible only for one group. This is necessary so the groups
# don't "fight"
for appliance in appliances[:len(appliances) - pool_size]:
if appliance.is_visible_only_in_group(gs.user_group):
self.logger.info("Killing an extra appliance {}/{} in shepherd".format(
appliance.id, appliance.name))
Appliance.kill(appliance)
# Killing old appliances
for filter_kill in filters_kill:
for template in Template.objects.filter(
ready=True, usable=True, template_group=gs.template_group,
preconfigured=preconfigured, container=None, **filter_kill):
for a in Appliance.objects.filter(
template=template, appliance_pool=None, marked_for_deletion=False):
self.logger.info(
"Killing appliance {}/{} in shepherd because it is obsolete now".format(
a.id, a.name))
Appliance.kill(a)
@singleton_task()
def free_appliance_shepherd(self):
generic_shepherd(self, True)
generic_shepherd(self, False)
@singleton_task()
def wait_appliance_ready(self, appliance_id):
"""This task checks for appliance's readiness for use. The checking loop is designed as retrying
the task to free up the queue."""
try:
appliance = Appliance.objects.get(id=appliance_id)
if appliance.appliance_pool is not None:
if appliance.appliance_pool.not_needed_anymore:
# Terminate task chain
self.request.callbacks[:] = []
kill_appliance.delay(appliance_id)
return
if appliance.power_state == Appliance.Power.UNKNOWN or appliance.ip_address is None:
self.retry(args=(appliance_id,), countdown=30, max_retries=45)
if Appliance.objects.get(id=appliance_id).cfme.ipapp.is_web_ui_running():
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ready = True
appliance.save()
appliance.set_status("The appliance is ready.")
with diaper:
appliance.synchronize_metadata()
else:
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ready = False
appliance.save()
appliance.set_status("Waiting for UI to appear.")
self.retry(args=(appliance_id,), countdown=30, max_retries=45)
except ObjectDoesNotExist:
# source object is not present, terminating
return
@singleton_task()
def anyvm_power_on(self, provider, vm):
provider = get_mgmt(provider)
provider.start_vm(vm)
@singleton_task()
def anyvm_power_off(self, provider, vm):
provider = get_mgmt(provider)
provider.stop_vm(vm)
@singleton_task()
def anyvm_suspend(self, provider, vm):
provider = get_mgmt(provider)
provider.suspend_vm(vm)
@singleton_task()
def anyvm_delete(self, provider, vm):
provider = get_mgmt(provider)
provider.delete_vm(vm)
@singleton_task()
def delete_template_from_provider(self, template_id):
template = Template.objects.get(id=template_id)
try:
template.provider_api.delete_template(template.name)
except Exception as e:
self.logger.exception(e)
return False
with transaction.atomic():
template = Template.objects.get(pk=template.pk)
template.exists = False
template.save()
return True
@singleton_task()
def appliance_rename(self, appliance_id, new_name):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
return None
if not appliance.provider.allow_renaming:
return None
if appliance.name == new_name:
return None
with redis.appliances_ignored_when_renaming(appliance.name, new_name):
self.logger.info("Renaming {}/{} to {}".format(appliance_id, appliance.name, new_name))
appliance.name = appliance.provider_api.rename_vm(appliance.name, new_name)
appliance.save()
return appliance.name
@singleton_task()
def rename_appliances_for_pool(self, pool_id):
with transaction.atomic():
try:
appliance_pool = AppliancePool.objects.get(id=pool_id)
except ObjectDoesNotExist:
return
appliances = [
appliance
for appliance
in appliance_pool.appliances
if appliance.provider_api.can_rename
]
for appliance in appliances:
if not appliance.provider.allow_renaming:
continue
new_name = '{}_'.format(appliance_pool.owner.username)
if appliance.version and not appliance.version.startswith('...'):
# CFME
new_name += 'cfme_{}_'.format(appliance.version.replace('.', ''))
else:
# MIQ
new_name += 'miq_'
new_name += '{}_{}'.format(
appliance.template.date.strftime("%y%m%d"),
fauxfactory.gen_alphanumeric(length=4))
appliance_rename.apply_async(
countdown=10, # To prevent clogging with the transaction.atomic
args=(appliance.id, new_name))
@singleton_task(soft_time_limit=60)
def check_update(self):
sprout_sh = project_path.join("sprout").join("sprout.sh")
try:
result = command.run([sprout_sh.strpath, "check-update"])
except command.CommandException as e:
result = e
needs_update = result.output.strip().lower() != "up-to-date"
redis.set("sprout-needs-update", needs_update)
@singleton_task()
def scavenge_managed_providers(self):
chord_tasks = []
for appliance in Appliance.objects.exclude(appliance_pool=None):
chord_tasks.append(scavenge_managed_providers_from_appliance.si(appliance.id))
chord(chord_tasks)(calculate_provider_management_usage.s())
@singleton_task(soft_time_limit=180)
def scavenge_managed_providers_from_appliance(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
return None
try:
managed_providers = appliance.ipapp.managed_known_providers
appliance.managed_providers = [prov.key for prov in managed_providers]
except Exception as e:
# To prevent single appliance messing up whole result
provider_error_logger().error("{}: {}".format(type(e).__name__, str(e)))
return None
return appliance.id
@singleton_task()
def calculate_provider_management_usage(self, appliance_ids):
results = {}
for appliance_id in filter(lambda id: id is not None, appliance_ids):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# Deleted in meanwhile
continue
for provider_key in appliance.managed_providers:
if provider_key not in results:
results[provider_key] = []
results[provider_key].append(appliance.id)
for provider in Provider.objects.all():
provider.appliances_manage_this_provider = results.get(provider.id, [])
@singleton_task()
def mailer_version_mismatch(self):
"""This is usually called per-mismatch, but as the mismatches are stored in database and the
mail can fail sending, so this can send the mismatches in a batch in this case."""
with transaction.atomic():
mismatches = MismatchVersionMailer.objects.filter(sent=False)
if not mismatches:
return
email_body = """\
Hello,
I am Sprout template version mismatch spammer. I think there are some version mismatches.
Here is the list:
{}
Sincerely,
Sprout template version mismatch spammer™
""".format(
"\n".join(
"* {} @ {} : supposed {} , true {}".format(
mismatch.template_name, mismatch.provider.id, mismatch.supposed_version,
mismatch.actual_version)
for mismatch in mismatches
)
)
user_mails = []
for user in User.objects.filter(is_superuser=True):
if user.email:
user_mails.append(user.email)
result = send_mail(
"Template version mismatches detected",
email_body,
"sprout-template-version-mismatch@example.com",
user_mails,
)
if result > 0:
for mismatch in mismatches:
mismatch.sent = True
mismatch.save()
@singleton_task()
def obsolete_template_deleter(self):
for group in Group.objects.all():
if group.template_obsolete_days_delete:
# We can delete based on the template age
obsolete_templates = group.obsolete_templates
if obsolete_templates is not None:
for template in obsolete_templates:
if template.can_be_deleted:
delete_template_from_provider.delay(template.id)
@singleton_task()
def connect_direct_lun(self, appliance_id):
appliance = Appliance.objects.get(id=appliance_id)
if not hasattr(appliance.provider_api, "connect_direct_lun_to_appliance"):
return False
try:
appliance.provider_api.connect_direct_lun_to_appliance(appliance.name, False)
except Exception as e:
appliance.set_status("LUN: {}: {}".format(type(e).__name__, str(e)))
return False
else:
appliance.reload()
with transaction.atomic():
appliance.lun_disk_connected = True
appliance.save()
return True
@singleton_task()
def disconnect_direct_lun(self, appliance_id):
appliance = Appliance.objects.get(id=appliance_id)
if not appliance.lun_disk_connected:
return False
if not hasattr(appliance.provider_api, "connect_direct_lun_to_appliance"):
return False
try:
appliance.provider_api.connect_direct_lun_to_appliance(appliance.name, True)
except Exception as e:
appliance.set_status("LUN: {}: {}".format(type(e).__name__, str(e)))
return False
else:
appliance.reload()
with transaction.atomic():
appliance.lun_disk_connected = False
appliance.save()
return True
@singleton_task()
def appliance_yum_update(self, appliance_id):
appliance = Appliance.objects.get(id=appliance_id)
appliance.ipapp.update_rhel(reboot=False)
@singleton_task()
def pick_templates_for_deletion(self):
"""Applies some heuristics to guess templates that might be candidates to deletion."""
to_mail = {}
for group in Group.objects.all():
for zstream, versions in group.pick_versions_to_delete().iteritems():
for version in versions:
for template in Template.objects.filter(
template_group=group, version=version, exists=True, suggested_delete=False):
template.suggested_delete = True
template.save()
if group.id not in to_mail:
to_mail[group.id] = {}
if zstream not in to_mail[group.id]:
to_mail[group.id][zstream] = {}
if version not in to_mail[group.id][zstream]:
to_mail[group.id][zstream][version] = []
to_mail[group.id][zstream][version].append(
"{} @ {}".format(template.name, template.provider.id))
# TODO: Figure out why it was spamming
if to_mail and False:
data = yaml.safe_dump(to_mail, default_flow_style=False)
email_body = """\
Hello,
just letting you know that there are some templates that you might like to delete:
{}
Visit Sprout's Templates page for more informations.
Sincerely,
Sprout.
""".format(data)
user_mails = []
for user in User.objects.filter(is_superuser=True):
if user.email:
user_mails.append(user.email)
send_mail(
"Possible candidates for template deletion",
email_body,
"sprout-template-deletion-suggest@example.com",
user_mails,
)
@singleton_task()
def check_swap_in_appliances(self):
chord_tasks = []
for appliance in Appliance.objects.filter(
ready=True, power_state=Appliance.Power.ON, marked_for_deletion=False).exclude(
power_state=Appliance.Power.ORPHANED):
chord_tasks.append(check_swap_in_appliance.si(appliance.id))
chord(chord_tasks)(notify_owners.s())
@singleton_task()
def check_swap_in_appliance(self, appliance_id):
appliance = Appliance.objects.get(id=appliance_id)
try:
swap_amount = appliance.ipapp.swap
except (SSHException, socket.error, Exception) as e:
if type(e) is Exception and 'SSH is unavailable' not in str(e):
# Because otherwise it might not be an SSH error
raise
ssh_failed = True
swap_amount = None
else:
ssh_failed = False
went_up = (
(appliance.swap is not None and swap_amount > appliance.swap) or
(appliance.swap is None and swap_amount is not None and swap_amount > 0))
ssh_failed_changed = ssh_failed and not appliance.ssh_failed
appliance.swap = swap_amount
appliance.ssh_failed = ssh_failed
appliance.save()
# Returns a tuple - (appliance_id, went_up?, current_amount, ssh_failed?)
return appliance.id, went_up, swap_amount, ssh_failed_changed
@singleton_task()
def notify_owners(self, results):
# Filter out any errors
results = [x for x in results if isinstance(x, (list, tuple)) and len(x) == 4]
per_user = {}
for appliance_id, went_up, current_swap, ssh_failed_changed in results:
if not went_up and not ssh_failed_changed:
# Not interested
continue
appliance = Appliance.objects.get(id=appliance_id)
if appliance.appliance_pool is not None:
username = appliance.appliance_pool.owner.username
user = appliance.appliance_pool.owner
else:
username = 'SHEPHERD'
user = None
issues = []
if went_up:
issues.append('swap++ {}M'.format(current_swap))
if ssh_failed_changed:
issues.append('ssh unreachable')
message = '{}/{} {}'.format(
appliance.name, appliance.ip_address, ', '.join(issues))
if user is None:
# No email
continue
if not user.email:
# Same here
continue
# We assume that "living" users have an e-mail set therefore we will not nag about bots'
# appliances.
send_message('{}: {}'.format(username, message))
# Add the message to be sent
if user not in per_user:
per_user[user] = []
per_user[user].append(message)
# Send out the e-mails
for user, messages in per_user.iteritems():
appliance_list = '\n'.join('* {}'.format(message) for message in messages)
email_body = """\
Hello,
I discovered that some of your appliances are behaving badly. Please check them out:
{}
Best regards,
The Sprout™
""".format(appliance_list)
send_mail(
"[Sprout] Appliance swap report",
email_body,
"sprout-appliance-swap@example.com",
[user.email],
)
@singleton_task()
def appliances_synchronize_metadata(self):
for appliance in Appliance.objects.all():
try:
appliance.synchronize_metadata()
except ObjectDoesNotExist:
return
@singleton_task()
def synchronize_untracked_vms(self):
for provider in Provider.objects.filter(working=True, disabled=False):
synchronize_untracked_vms_in_provider.delay(provider.id)
def parsedate(d):
if d is None:
return d
else:
return iso8601.parse_date(d)
@singleton_task()
def synchronize_untracked_vms_in_provider(self, provider_id):
"""'re'-synchronizes any vms that might be lost during outages."""
provider = Provider.objects.get(id=provider_id)
provider_api = provider.api
if not hasattr(provider_api, 'list_vm'):
# This provider does not have VMs (eg. Hawkular or Openshift)
return
for vm_name in sorted(map(str, provider_api.list_vm())):
if Appliance.objects.filter(name=vm_name, template__provider=provider).count() != 0:
continue
# We have an untracked VM. Let's investigate
try:
appliance_id = provider_api.get_meta_value(vm_name, 'sprout_id')
except KeyError:
continue
except NotImplementedError:
# Do not bother if not implemented in the API
return
# just check it again ...
if Appliance.objects.filter(id=appliance_id).count() == 1:
# For some reason it is already in
continue
# Now it appears that this is a VM that was in Sprout
construct = {'id': appliance_id}
# Retrieve appliance data
try:
self.logger.info('Trying to reconstruct appliance %d/%s', appliance_id, vm_name)
construct['name'] = vm_name
template_id = provider_api.get_meta_value(vm_name, 'sprout_source_template_id')
# Templates are not deleted from the DB so this should be OK.
construct['template'] = Template.objects.get(id=template_id)
construct['name'] = vm_name
construct['ready'] = provider_api.get_meta_value(vm_name, 'sprout_ready')
construct['description'] = provider_api.get_meta_value(vm_name, 'sprout_description')
construct['lun_disk_connected'] = provider_api.get_meta_value(
vm_name, 'sprout_lun_disk_connected')
construct['swap'] = provider_api.get_meta_value(vm_name, 'sprout_swap')
construct['ssh_failed'] = provider_api.get_meta_value(vm_name, 'sprout_ssh_failed')
# Time fields
construct['datetime_leased'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_datetime_leased'))
construct['leased_until'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_leased_until'))
construct['status_changed'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_status_changed'))
construct['created_on'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_created_on'))
construct['modified_on'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_modified_on'))
except KeyError as e:
self.logger.error('Failed to reconstruct %d/%s', appliance_id, vm_name)
self.logger.exception(e)
continue
# Retrieve pool data if applicable
try:
pool_id = provider_api.get_meta_value(vm_name, 'sprout_pool_id')
pool_construct = {'id': pool_id}
pool_construct['total_count'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_total_count')
group_id = provider_api.get_meta_value(
vm_name, 'sprout_pool_group')
pool_construct['group'] = Group.objects.get(id=group_id)
try:
construct_provider_id = provider_api.get_meta_value(
vm_name, 'sprout_pool_provider')
pool_construct['provider'] = Provider.objects.get(id=construct_provider_id)
except (KeyError, ObjectDoesNotExist):
# optional
pool_construct['provider'] = None
pool_construct['version'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_version')
pool_construct['date'] = parsedate(provider_api.get_meta_value(
vm_name, 'sprout_pool_appliance_date'))
owner_id = provider_api.get_meta_value(
vm_name, 'sprout_pool_owner_id')
try:
owner = User.objects.get(id=owner_id)
except ObjectDoesNotExist:
owner_username = provider_api.get_meta_value(
vm_name, 'sprout_pool_owner_username')
owner = User(id=owner_id, username=owner_username)
owner.save()
pool_construct['owner'] = owner
pool_construct['preconfigured'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_preconfigured')
pool_construct['description'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_description')
pool_construct['not_needed_anymore'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_not_needed_anymore')
pool_construct['finished'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_finished')
pool_construct['yum_update'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_yum_update')
try:
construct['appliance_pool'] = AppliancePool.objects.get(id=pool_id)
except ObjectDoesNotExist:
pool = AppliancePool(**pool_construct)
pool.save()
construct['appliance_pool'] = pool
except KeyError as e:
pass
appliance = Appliance(**construct)
appliance.save()
# And now, refresh!
refresh_appliances_provider.delay(provider.id)
@singleton_task()
def read_docker_images_from_url(self):
for group in Group.objects.exclude(Q(templates_url=None) | Q(templates_url='')):
read_docker_images_from_url_group.delay(group.id)
@singleton_task()
def read_docker_images_from_url_group(self, group_id):
group = Group.objects.get(id=group_id)
with closing(urlopen(group.templates_url)) as http:
root = etree.parse(http, parser=etree.HTMLParser()).getroot()
result = set()
for link in root.xpath('//a[../../td/img[contains(@src, "folder")]]'):
try:
href = link.attrib['href']
except KeyError:
continue
url = group.templates_url + href
version_with_suffix = href.rstrip('/') # Does not contain the last digit
try:
with closing(urlopen(url + 'cfme-docker')) as http:
cfme_docker = http.read().strip()
except HTTPError:
self.logger.info('Skipping {} (no docker)'.format(url))
continue
try:
with closing(urlopen(url + 'version')) as http:
cfme_version = http.read().strip()
if '-' in version_with_suffix:
# Use the suffix from the folder name
suffix = version_with_suffix.rsplit('-', 1)[-1]
cfme_version = '{}-{}'.format(cfme_version, suffix)
except HTTPError:
self.logger.info('Skipping {} (no version)'.format(url))
continue
cfme_docker = re.split(r'\s+', cfme_docker)
if len(cfme_docker) == 2:
pull_url, latest_mapping = cfme_docker
latest = re.sub(r'^\(latest=([^)]+)\)$', '\\1', latest_mapping)
proper_pull_url = re.sub(r':latest$', ':{}'.format(latest), pull_url)
elif cfme_docker and cfme_docker[0].lower().strip() == 'tags:':
# Multiple tags, take the longest
proper_pull_url = sorted(filter(None, cfme_docker[1:]), key=len, reverse=True)[0]
latest = proper_pull_url.rsplit(':', 1)[-1]
else:
self.logger.info('Skipping: unknown format: {!r}'.format(cfme_docker))
continue
if cfme_version in result:
continue
process_docker_images_from_url_group.delay(group.id, cfme_version, latest, proper_pull_url)
result.add(cfme_version)
@singleton_task()
def process_docker_images_from_url_group(self, group_id, version, docker_version, pull_url):
group = Group.objects.get(id=group_id)
# "-20160624221308"
date = docker_version.rsplit('-', 1)[-1]
try:
date = datetime.strptime(date, '%Y%m%d%H%M%S').date() # noqa
except AttributeError:
raise ValueError('Could not parse date from {}'.format(docker_version))
for provider in Provider.objects.exclude(container_base_template=None):
try:
Template.objects.get(
~Q(container=None), template_group=group, provider=provider, version=version,
date=date, preconfigured=True)
except ObjectDoesNotExist:
create_docker_vm.delay(group.id, provider.id, version, date, pull_url)
def docker_vm_name(version, date):
return 'docker-{}-{}-{}'.format(
re.sub(r'[^0-9a-z]', '', version.lower()),
re.sub(r'[^0-9]', '', str(date)),
fauxfactory.gen_alphanumeric(length=4).lower())
@singleton_task()
def create_docker_vm(self, group_id, provider_id, version, date, pull_url):
group = Group.objects.get(id=group_id)
provider = Provider.objects.get(id=provider_id)
with transaction.atomic():
if provider.remaining_configuring_slots < 1:
self.retry(
args=(group_id, provider_id, version, date, pull_url), countdown=60, max_retries=60)
new_name = docker_vm_name(version, date)
new_template = Template(
template_group=group, provider=provider,
container='cfme', name=new_name, original_name=provider.container_base_template,
version=version, date=date,
ready=False, exists=False, usable=True, preconfigured=True)
new_template.save()
workflow = chain(
prepare_template_deploy.si(new_template.id),
configure_docker_template.si(new_template.id, pull_url),
prepare_template_seal.si(new_template.id),
prepare_template_poweroff.si(new_template.id),
prepare_template_finish.si(new_template.id),
)
workflow.link_error(prepare_template_delete_on_error.si(new_template.id))
workflow()
@singleton_task()
def configure_docker_template(self, template_id, pull_url):
template = Template.objects.get(id=template_id)
template.set_status("Waiting for SSH.")
appliance = CFMEAppliance(template.provider_name, template.name, container=template.container)
appliance.ipapp.wait_for_ssh()
with appliance.ipapp.ssh_client as ssh:
template.set_status("Setting the pull URL.")
ssh.run_command(
'echo "export CFME_URL={}" > /etc/cfme_pull_url'.format(pull_url), ensure_host=True)
template.set_status("Pulling the {}.".format(pull_url))
ssh.run_command('docker pull {}'.format(pull_url), ensure_host=True)
template.set_status('Pulling finished.')
@singleton_task()
def sync_appliance_hw(self, appliance_id):
Appliance.objects.get(id=appliance_id).sync_hw()
@singleton_task()
def sync_provider_hw(self, provider_id):
Provider.objects.get(id=provider_id).perf_sync()
@singleton_task()
def sync_quotas_perf(self):
for provider in Provider.objects.all():
sync_provider_hw.delay(provider.id)
for appliance in provider.currently_managed_appliances:
sync_appliance_hw.delay(appliance.id)
|
gpl-2.0
| 6,819,446,563,095,956,000
| 41.771268
| 100
| 0.617997
| false
| 4.081474
| true
| false
| false
|
jeff-99/toolbox
|
toolbox/config.py
|
1
|
5198
|
import json
import collections
from .mixins import ConfigMixin
from .defaults import *
class ConfigManager(object):
"""
The config manager has the responsibility of persisting plugin configs.
On initialisation it creates a default file structure in the user's home directory
"""
FILE_EXT = '.json'
def __init__(self):
if not os.path.isdir(TOOLBOX_DIR):
os.mkdir(TOOLBOX_DIR)
if not os.path.isdir(CONF_DIR):
os.mkdir(CONF_DIR)
if not os.path.isdir(LOCAL_PLUGIN_DIR):
os.mkdir(LOCAL_PLUGIN_DIR)
self.config_dir = CONF_DIR
def load_plugin(self, name):
"""
Load the plugin config file by name and return an py:class:`toolbox.config.PluginConfig`
:param str name:
:return: an instance of PluginConfig for given plugin name
:rtype: toolbox.config.PluginConfig
"""
file_name = name + ConfigManager.FILE_EXT
path = os.path.join(self.config_dir, file_name)
if not os.path.exists(path):
plugin_config = PluginConfig()
elif os.path.exists(path) and not os.path.isfile(path):
raise TypeError('{} is not a file'.format(path))
else:
with open(path, 'r') as f:
try:
config = json.load(f)
plugin_config = PluginConfig.create_from_dict(config)
except ValueError:
plugin_config = PluginConfig()
return plugin_config
def save_plugin(self, name, config):
"""
save a plugin config by name
before saving the global config key is deleted
:param str name: Name of the plugin
:param config: instance of an py:class:`toolbox.config.PluginConfig`
:return:
"""
file_name = name + ConfigManager.FILE_EXT
path = os.path.join(self.config_dir, file_name)
if os.path.exists(path) and not os.path.isfile(path):
raise Exception('path exists but it ain\'t a file Brah')
if PluginConfig.GLOBAL_KEY in config:
del config[PluginConfig.GLOBAL_KEY]
with open(path, 'w') as f:
f.write(config.to_json())
def save(self, plugins):
"""
Convenience method to save a list of plugins. Only configs that have been modified since loading will be saved.
:param iterable plugins: list of instances of base class py:class:`toolbox.plugin.ToolboxPlugin`
:return:
"""
for plugin in plugins:
if isinstance(plugin, ConfigMixin):
conf = plugin.get_config()
if conf.modified:
self.save_plugin(plugin.name, conf)
class PluginConfig(object):
"""
Config container for plugin configs. Acts like a dictionary with some extra convenience methods.
The config has a special key for global configs which can be accessed with the 'get_global_config' method
"""
GLOBAL_KEY = '__GLOBAL__'
def __init__(self):
self._config = collections.defaultdict(lambda: None)
self.modified = False
def __getitem__(self, item):
return self._config[item]
def __setitem__(self, key, value):
self.modified = True if key != PluginConfig.GLOBAL_KEY else False
self._config[key] = value
def __delitem__(self, key):
self.modified = True if key != PluginConfig.GLOBAL_KEY else False
del self._config[key]
def __contains__(self, item):
return item in self._config
def __add__(self, other):
if not isinstance(other, PluginConfig):
return self
for key in other.keys():
self.modified = True if key != PluginConfig.GLOBAL_KEY else False
self[key] = other[key]
return self
def __sub__(self, other):
"""
Remove the keys of the other config
:param other:
:return:
"""
if self is other or not isinstance(other, PluginConfig):
return self
for key in other.keys():
if key in self:
self.modified = True if key != PluginConfig.GLOBAL_KEY else False
del self[key]
return self
def __len__(self):
return len(list(filter(lambda x: x != PluginConfig.GLOBAL_KEY,
self._config.keys())))
def set_global_config(self, config):
self[PluginConfig.GLOBAL_KEY] = config
def get_global_config(self):
return self[PluginConfig.GLOBAL_KEY]
def keys(self):
return self._config.keys()
def to_json(self):
"""
Converts the config values to a JSON string
:return: JSON string
:rtype: str
"""
return json.dumps(self._config, indent=True)
@classmethod
def create_from_dict(cls, dict):
"""
Factory method to create a PluginConfig from a python dictionary
:param dict:
:return: a PluginConfig
:rtype: py:class:`toolbox.config.PluginConfig`
"""
config = cls()
for k in dict:
config[k] = dict[k]
return config
|
isc
| 3,188,271,387,001,703,400
| 29.397661
| 119
| 0.585995
| false
| 4.267652
| true
| false
| false
|
flexpeace/btb
|
scanblog/scanning/views.py
|
1
|
31871
|
import json
import datetime
import tempfile
import logging
logger = logging.getLogger("django.request")
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import Q
from django.shortcuts import get_object_or_404, render, redirect
from django.utils.translation import ugettext as _
from django.http import Http404, HttpResponseBadRequest
from django.contrib.auth.views import logout
from celery.result import AsyncResult
from scanblog.celery import app
from btb.utils import args_method_decorator, permission_required_or_deny, JSONView
from scanning import utils, tasks
from scanning.models import *
from scanning.forms import LockForm, TranscriptionForm, ScanUploadForm, \
FlagForm, get_org_upload_form
from annotations.models import Tag, Note, ReplyCode, handle_flag_spam
from annotations.tasks import send_flag_notification_email
from profiles.models import Organization, Profile, Affiliation
from comments.forms import CommentForm
def get_boolean(val):
return bool(val == "true" or val == "1")
class Scans(JSONView):
@permission_required_or_deny("scanning.change_scan")
def get(self, request, obj_id=None):
if obj_id:
scans = Scan.objects.filter(pk=obj_id)
else:
scans = Scan.objects.all().order_by('-created')
if request.GET.get("processing_complete"):
scans = scans.filter(
processing_complete=
get_boolean(request.GET.get("processing_complete"))
)
if request.GET.get("managed"):
try:
managed = bool(int(request.GET.get('managed')))
except ValueError:
managed = False
if managed:
scans = scans.filter(
Q(author__isnull=True) |
Q(author__profile__managed=True)
)
else:
scans = scans.filter(author__profile__managed=False)
if request.GET.get("editlock__isnull"):
scans = scans.filter(
editlock__isnull=
get_boolean(request.GET.get("editlock__isnull"))
)
# A scan can be valid two ways: 1 -- the author is in the moderator's
# orgs. 2 -- the scan's selected org is one of the author's orgs.
# Hence the "extra_q".
scans = scans.org_filter(request.user)
return self.paginated_response(request, scans)
class ScanSplits(JSONView):
"""
{
"scan": scan
"documents": [{
"id": document id
"type": type
"title": title or ""
"pages": [id,id,id,...]
}]
}
"""
def clean_params(self, request):
kw = json.loads(request.body)
return kw
@permission_required_or_deny("scanning.change_scan")
def get(self, request, obj_id=None):
try:
scan = Scan.objects.org_filter(request.user, pk=obj_id).get()
except Scan.DoesNotExist:
raise PermissionDenied
try:
lock = EditLock.objects.get(scan__pk=scan.pk)
if lock.user == request.user:
lock.save()
except EditLock.DoesNotExist:
lock = EditLock.objects.create(user=request.user, scan=scan)
tasks.expire_editlock.apply_async(args=[lock.id], countdown=60*5)
split = {
"scan": scan.to_dict(),
"documents": [],
"lock": lock.to_dict() if lock.user_id != request.user.id else None
}
# This will select a duplicate document for each scan page it contains.
documents = Document.objects.order_by(
'documentpage__scan_page__order'
).distinct().filter(scan__pk=scan.pk)
# Since we got duplicates, filter them down here.
visited = set()
for doc in documents:
if doc.id in visited:
continue
visited.add(doc.id)
split['documents'].append({
"id": doc.pk,
"type": doc.type,
"title": doc.title,
"status": doc.status,
"pages": list(doc.documentpage_set.order_by("order").values_list("scan_page__pk", flat=True))
})
return self.json_response(split)
@permission_required_or_deny("scanning.change_scan", "scanning.add_document",
"scanning.change_document", "scanning.delete_document")
def post(self, request, obj_id=None):
"""
Execute splits for a scan. This could be updating an existing models,
or creating new ones.
"""
logger.debug("Starting split")
with transaction.atomic():
try:
scan = Scan.objects.org_filter(request.user, pk=obj_id).get()
except Scan.DoesNotExist:
raise PermissionDenied
params = self.clean_params(request)
#
# Save scan.
#
try:
scan.author = Profile.objects.org_filter(request.user,
pk=params["scan"]["author"]["id"]
).get().user
scan.processing_complete = params["scan"]["processing_complete"]
except (KeyError, TypeError, Profile.DoesNotExist):
# Processing complete is always False if there is no author; hence
# two cases in the try block.
scan.author = None
scan.processing_complete = False
scan.save()
# Set pending scan.
ps_code = (params['scan'].pop("pendingscan_code", None) or "").strip()
try:
has_ps = bool(scan.pendingscan)
except PendingScan.DoesNotExist:
has_ps = False
if not ps_code and has_ps:
# Remove the cached pendingscan reference. Ugh. (simply setting
# scan.pendingscan to None raises an error)
ps = scan.pendingscan
ps.scan = None
ps.completed = None
ps.save()
scan = Scan.objects.get(pk=scan.pk)
elif ps_code:
try:
ps = PendingScan.objects.org_filter(
request.user, code=ps_code.strip()
).get()
except PendingScan.DoesNotExist:
pass
else:
if ps.scan_id != scan.id:
ps.scan = scan
ps.completed = datetime.datetime.now()
ps.save()
#
# Split documents
#
docs = []
for doc in params["documents"]:
if ("pages" not in doc) or (len(doc["pages"]) == 0):
# Delete stale document.
if "id" in doc:
try:
Document.objects.org_filter(
request.user, pk=doc["id"]
).get().full_delete()
except Document.DoesNotExist:
pass
continue
if "id" in doc:
# Retrieve existing document.
try:
document = Document.objects.org_filter(
request.user, pk=doc["id"]
).get()
except Document.DoesNotExist:
raise PermissionDenied
else:
# Create new document.
if doc["type"] in ("request", "license"):
status = "unpublishable"
else:
status = "unknown"
document = Document.objects.create(
scan=scan,
editor=request.user,
author=scan.author,
type=doc["type"],
status=status,
)
# Create tickets
if doc["type"] == "request":
Note.objects.create(
text="Request from scan.",
document=document,
resolved=None,
creator=request.user,
)
elif doc["type"] == "license" and \
not document.author.profile.consent_form_received:
Note.objects.create(
text="Please check this license agreement, then update the user's license status accordingly.",
document=document,
resolved=None,
creator=request.user,
)
# Apportion pages.
pages = []
# We need to transfer old page transforms to new document pages,
# indexed by the scanpage_id, which persists.
old_page_transformations = {}
# ... and do the same for highlight_transform -- we need to change
# the documentpage_id to the new documentpage_id.
if document.highlight_transform:
old_highlight_transform = json.loads(document.highlight_transform)
else:
old_highlight_transform = ""
highlight_scan_page_id = None
# Loop through current pages to get info to transfer to new pages,
# and delete the old pages.
for page in document.documentpage_set.all():
old_page_transformations[page.scan_page_id] = page.transformations
# Capture the old highlight transform's scan page ID.
if old_highlight_transform and \
page.pk == old_highlight_transform["document_page_id"]:
highlight_scan_page_id = page.scan_page_id
page.full_delete()
# Clear the highlight transform so that it remains 'valid' even if
# something goes wrong in identifying it with an old scan_page_id.
document.highlight_transform = ""
# Recreate the new pages, reusing the old transforms.
for order,scanpage_id in enumerate(doc["pages"]):
documentpage = DocumentPage.objects.create(
document=document,
scan_page=ScanPage.objects.get(pk=scanpage_id),
order=order,
transformations=old_page_transformations.get(scanpage_id, "{}"),
)
# Reuse the old highlight transform, if it matches.
if scanpage_id == highlight_scan_page_id:
old_highlight_transform["document_page_id"] = documentpage.pk
document.highlight_transform = json.dumps(old_highlight_transform)
document.save()
document.documentpage_set = pages
docs.append(document)
scan.document_set = docs
# Must do update_document_images outside transaction.atomic
for document in docs:
if document.status in ("published", "ready"):
# Persist any changes to highlight_transform.
tasks.update_document_images.delay(document.pk).get()
#XXX Shouldn't be necessary but seems to be.
scan.save()
return self.get(request, obj_id=scan.pk)
class MissingHighlight(Exception):
pass
class Documents(JSONView):
def clean_params(self, request):
kw = json.loads(request.body)
return kw
@permission_required_or_deny("scanning.change_document")
def get(self, request, obj_id=None):
docs = Document.objects.org_filter(request.user)
g = request.GET.get
if g("author__profile__managed", 0) == "1":
docs = docs.filter(author__profile__managed=True)
if g("author_id", None):
docs = docs.filter(author__pk=g("author_id"))
if g("type", None):
docs = docs.filter(type=g("type"))
if g("idlist", None):
ids = [a for a in g("idlist").split(".") if a]
if not ids:
raise Http404
docs = [b for a,b in sorted(docs.in_bulk(ids).items())]
if g("status", None):
docs = docs.filter(status=g("status"))
#TODO: EditLock's for documents.
return self.paginated_response(request, docs)
@permission_required_or_deny("scanning.change_document")
def put(self, request, obj_id=None):
try:
with transaction.atomic():
try:
doc = Document.objects.org_filter(request.user, pk=obj_id).get()
except Document.DoesNotExist:
raise PermissionDenied
kw = self.clean_params(request)
try:
doc.author = Profile.objects.org_filter(
request.user,
pk=kw['author']['id']
).get().user
except Profile.DoesNotExist:
raise PermissionDenied
doc.editor = request.user
doc.title = kw['title']
if doc.type == "post":
try:
assert len(kw['highlight_transform']['crop']) > 0
except (AssertionError, KeyError):
raise MissingHighlight
doc.highlight_transform = json.dumps(kw['highlight_transform'])
if not kw['in_reply_to']:
doc.in_reply_to = None
else:
reply_code = ReplyCode.objects.get(code__iexact=kw['in_reply_to'])
# Avoid recursion.
if reply_code.pk != doc.reply_code.pk:
doc.in_reply_to = reply_code
else:
doc.in_reply_to = None
# Set affiliation, if any
try:
doc.affiliation = Affiliation.objects.org_filter(request.user).get(
pk=kw['affiliation']['id'])
except (TypeError, KeyError, Affiliation.DoesNotExist):
doc.affiliation = None
doc.adult = kw['adult']
# Ensure other processes won't try to serve this until we're done building.
doc.date_written = kw['date_written']
doc.status = "unknown"
doc.save()
# tags
tags = []
for name in kw['tags'].split(';'):
name = name.strip()
if name:
tag, created = Tag.objects.get_or_create(name=name.strip().lower())
tags.append(tag)
doc.tags = tags
# pages
order_changed = []
for page in kw['pages']:
docpage = doc.documentpage_set.get(pk=page['id'])
transformations = json.dumps(page['transformations'] or "")
if docpage.transformations != transformations:
docpage.transformations = transformations
docpage.save()
if page['order'] != docpage.order:
# Save a nonsensical order to avoid constraint clash, set
# correct order, but don't save until we're all done.
docpage.order = -docpage.order - 1
docpage.save()
docpage.order = page['order']
order_changed.append(docpage)
for page in order_changed:
page.save()
except MissingHighlight:
return HttpResponseBadRequest("Missing highlight.")
#XXX this additional save should not be needed, but seems to be. Issue
# with transaction.atomic() ?
doc.save()
# Split images.
result = tasks.update_document_images.delay(
document_id=doc.pk, status=kw['status']).get()
logger.debug(u"post image update {}".format(doc.highlight_transform))
# Update to get current status after task finishes.
doc = Document.objects.get(pk=doc.pk)
response = self.json_response(doc.to_dict())
return response
#
# Pending scan CRUD
#
class PendingScans(JSONView):
@permission_required_or_deny("scanning.change_pendingscan")
def get(self, request, obj_id=None):
if obj_id:
pendingscans = PendingScan.objects.filter(pk=obj_id)
elif "missing" in request.GET:
pendingscans = PendingScan.objects.missing()
elif "pending" in request.GET:
pendingscans = PendingScan.objects.pending()
elif "fulfilled" in request.GET:
pendingscans = PendingScan.objects.fulfilled()
else:
pendingscans = PendingScan.objects.all()
if "author_id" in request.GET:
pendingscans = pendingscans.filter(author__pk=request.GET["author_id"])
pendingscans = pendingscans.org_filter(request.user)
return self.paginated_response(request, pendingscans)
@permission_required_or_deny("scanning.add_pendingscan")
def post(self, request, obj_id=None):
params = json.loads(request.body)
try:
org = Organization.objects.org_filter(
request.user, pk=params["org_id"]
).get()
except Organization.DoesNotExist:
raise PermissionDenied
try:
author = Profile.objects.org_filter(
request.user, pk=params["author_id"]
).get().user
except Profile.DoesNotExist:
raise PermissionDenied
pendingscan = PendingScan.objects.create(
editor=self.request.user,
author=author,
org=org,
)
return self.json_response(pendingscan.to_dict())
@permission_required_or_deny("scanning.change_pendingscan")
def put(self, request, obj_id=None):
try:
ps = PendingScan.objects.org_filter(
request.user, pk=obj_id
).get()
except PendingScan.DoesNotExist:
raise PermissionDenied
params = json.loads(request.body)
if 'missing' in params:
if params['missing'] == 1:
ps.completed = datetime.datetime.now()
else:
ps.completed = None
ps.save()
return self.json_response(ps.to_dict())
@permission_required_or_deny("scanning.delete_scan")
def delete(self, request, obj_id=None):
try:
ps = PendingScan.objects.org_filter(
request.user, pk=obj_id
).get()
except PendingScan.DoesNotExist:
raise PermissionDenied
ps.delete()
return self.json_response(ps.to_dict())
class ScanCodes(JSONView):
def get(self, request):
if "term" not in request.GET:
raise Http404
pss = PendingScan.objects.org_filter(
request.user,
code__icontains=request.GET.get("term"),
scan__isnull=True,
)
return self.json_response([ps.to_dict() for ps in pss])
@permission_required("scanning.add_scan")
def scan_add(request):
"""Displays a form for uploading a scan."""
FormClass = get_org_upload_form(request.user)
form = FormClass(request.POST or None, request.FILES or None, types={
"pdf": "application/pdf",
"zip": "application/zip",
})
if form.is_valid():
if request.FILES['file'].name.lower().endswith(".zip"):
with tempfile.NamedTemporaryFile(delete=False, suffix="scans.zip") as fh:
for chunk in request.FILES['file'].chunks():
fh.write(chunk)
fh.flush()
task_id = tasks.process_zip.delay(filename=fh.name,
uploader_id=request.user.pk,
org_id=form.cleaned_data['organization'].pk,
redirect=reverse("moderation.home")
)
else:
path = tasks.move_scan_file(uploaded_file=request.FILES['file'])
scan = Scan.objects.create(
uploader=request.user,
pdf=os.path.relpath(path, settings.MEDIA_ROOT),
under_construction=True,
org=form.cleaned_data['organization'])
task_id = tasks.split_scan.delay(scan_id=scan.pk,
redirect=reverse("moderation.edit_scan", args=[scan.pk]))
return redirect('moderation.wait_for_processing', task_id)
return render(request, "scanning/upload.html", {'form': form})
@permission_required("scanning.change_scan")
def scan_merge(request, scan_id):
""" Merge an existing scan with a new file """
try:
scan = Scan.objects.org_filter(request.user, pk=scan_id).get()
except Scan.DoesNotExist:
raise Http404
form = ScanUploadForm(request.POST or None, request.FILES or None, types={
'pdf': 'application/pdf',
})
if form.is_valid():
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as fh:
for chunk in request.FILES['file'].chunks():
fh.write(chunk)
name = fh.name
task_id = tasks.merge_scans.delay(
scan_id=scan_id,
filename=name,
redirect=reverse("moderation.edit_scan", args=[scan.pk])
)
return redirect("moderation.wait_for_processing", task_id)
return render(request, "scanning/merge.html", {'form': form})
@permission_required("scanning.change_scan")
def scan_replace(request, scan_id=None):
try:
scan = Scan.objects.org_filter(request.user, pk=scan_id).get()
except Scan.DoesNotExist:
raise PermissionDenied
form = ScanUploadForm(request.POST or None, request.FILES or None, types={
"pdf": "application/pdf",
})
if form.is_valid():
filepath = tasks.move_scan_file(uploaded_file=request.FILES['file'])
scan.full_delete(filesonly=True)
scan.uploader = request.user
scan.pdf = os.path.relpath(filepath, settings.MEDIA_ROOT)
scan.save()
task_id = tasks.split_scan.delay(
scan_id=scan.pk,
redirect=reverse("moderation.edit_scan", args=[scan.pk])
)
return redirect('moderation.wait_for_processing', task_id)
return render(request, "scanning/replace.html", {'form': form})
@permission_required("scanning.delete_scan")
def scan_delete(request, scan_id=None):
try:
scan = Scan.objects.org_filter(request.user, pk=scan_id).get()
except Scan.DoesNotExist:
raise PermissionDenied
if request.method != "POST":
return render(request, "scanning/delete.html", {
'scan': scan
})
scan.full_delete()
messages.info(request, "Scan deleted.")
return redirect(reverse("moderation.home") + "#/process")
@permission_required("scanning.delete_document")
def doc_delete(request, document_id=None):
try:
doc = Document.objects.org_filter(request.user, pk=document_id).get()
except Document.DoesNotExist:
raise PermissionDenied
if request.method != 'POST':
return redirect(reverse("moderation.edit_doc", document_id))
doc.full_delete()
messages.info(request, "Document deleted.")
return redirect(reverse("moderation.home") + "#/process")
@permission_required('scanning.change_scan')
def scan_reimport(request, scan_id=None):
try:
scan = Scan.objects.org_filter(request.user, pk=scan_id).get()
except Scan.DoesNotExist:
raise PermissionDenied
if request.method != "POST":
return render(request, "scanning/reimport.html", {
'scan': scan
})
task_id = tasks.process_scan.delay(
scan_id=scan_id,
redirect=reverse("moderation.home") + \
"#/process/scan/%s" % scan.id
).task_id
return redirect("moderation.wait_for_processing", task_id)
#
# Transcriptions
#
@permission_required('scanning.change_transcription')
def transcribe_document(request, document_id):
"""Show and process the form for editing a transcription."""
if not settings.TRANSCRIPTION_OPEN:
raise Http404
document = get_object_or_404(Document, pk=document_id)
if not document.scan_id:
raise Http404
can_lock = request.user.has_perm('scanning.change_locked_transcription')
try:
transcription = document.transcription
except Transcription.DoesNotExist:
transcription = Transcription(document=document)
if transcription.locked and not can_lock:
raise PermissionDenied
if can_lock:
lockform = LockForm(request.POST or None)
else:
lockform = ''
current = transcription.current()
if current:
initial = {'body': current.body, 'complete': transcription.complete}
else:
initial = None
form = TranscriptionForm(request.POST or None, initial=initial)
if form.is_valid():
if lockform and lockform.is_valid():
transcription.locked = lockform.cleaned_data['lock_transcription']
transcription.save()
# "sugar" is a honeypot for spam
if form.has_changed() and not request.POST.get("sugar", None):
# Don't add a revision for rapid changes.
cutoff = datetime.datetime.now() - datetime.timedelta(seconds=120)
transcription.complete = form.cleaned_data.get('complete', False)
transcription.save()
if (current and current.editor == request.user and
cutoff < current.modified):
current.body = form.cleaned_data['body']
current.save()
else:
if not current or current.body != form.cleaned_data['body']:
current = TranscriptionRevision.objects.create(
revision=current.revision + 1 if current else 0,
transcription=transcription,
body=form.cleaned_data['body'],
editor=request.user
)
messages.success(request, _("Thanks for your attention to detail. Transcription updated."))
if document.type == "post":
return redirect("scanning.after_transcribe_comment", document_id=document.pk)
return redirect(document.get_absolute_url() + "#transcription")
pages = document.documentpage_set.all()
return render(request, "scanning/transcription_edit.html", {
'lockform': lockform,
'transcription': transcription,
'document': document,
'documentpages': pages,
'documentpage_count': pages.count(),
'form': form,
'cancel_link': document.get_absolute_url(),
})
@permission_required("scanning.change_transcription")
def after_transcribe_comment(request, document_id):
"""
Prompt for a comment after a transcription is done.
"""
document = get_object_or_404(Document, pk=document_id,
type="post",
scan__isnull=False,
transcription__isnull=False)
# Don't prompt for comment if they've already commented on this post.
if document.comments.filter(user=request.user).exists() or \
(not settings.COMMENTS_OPEN) or \
document.author.profile.comments_disabled:
return redirect(document.get_absolute_url() + "#transcription")
if document.transcription.complete:
prompt_text = "Thanks for writing! I finished the transcription for your post."
else:
prompt_text = "Thanks for writing! I worked on the transcription for your post."
form = CommentForm(request.POST or None, initial={
'comment': prompt_text
})
if form.is_valid():
comment, created = Comment.objects.get_or_create(
document=document,
comment=form.cleaned_data['comment'],
user=request.user,
)
if created:
comment.document = document
return redirect("%s#%s" % (request.path, comment.pk))
return render(request, "scanning/after_transcribe_comment.html", {
'document': document,
'form': form,
})
def revision_list(request, document_id):
"""
Main revision display.
"""
doc = get_object_or_404(Document, pk=document_id)
if doc.status != "published":
raise Http404
try:
revisions = list(doc.transcription.revisions.all())
except Transcription.DoesNotExist:
revisions = []
return render(request, "scanning/revision_list.html", {
'document' : doc,
'revisions': revisions,
})
def revision_compare(request, document_id):
"""
AJAX comparison between two revisions
"""
try:
document = Document.objects.get(pk=document_id)
earliest = TranscriptionRevision.objects.get(
transcription__document=document,
revision=int(request.GET['earliest']))
latest = TranscriptionRevision.objects.get(
transcription__document=document,
revision=int(request.GET['latest']))
except (KeyError, Document.DoesNotExist, TranscriptionRevision.DoesNotExist):
raise
return render(request, "scanning/_column_diff.html", {
'document': document, 'earliest': earliest, 'latest': latest,
})
@login_required
def flag_document(request, document_id):
"""
Flag a post.
"""
if not request.user.is_active:
raise PermissionDenied
doc = get_object_or_404(Document, pk=document_id)
form = FlagForm(request.POST or None)
if form.is_valid():
if handle_flag_spam(request.user, form.cleaned_data['reason']):
messages.info(request, _(u"Your account has been suspended due to behavior that looks like spam. If this is an error, please contact us using the contact link at the bottom of the page."))
logout(request)
return redirect("/")
ticket, created = Note.objects.get_or_create(
creator=request.user,
text="FLAG from user. \n %s" % form.cleaned_data['reason'],
resolved=None,
important=form.cleaned_data['urgent'],
document=doc,
)
# Queue up an async process to send notification email in 2 minutes (we
# delay to trap spam floods).
if created:
send_flag_notification_email.apply_async(
args=[ticket.pk], countdown=120)
messages.info(request, _(u"A moderator will review that post shortly. Thanks for helping us run a tight ship."))
return redirect(doc.get_absolute_url())
# redirect to confirmation.
return render(request, "scanning/flag.html", {
'form': form,
})
|
agpl-3.0
| -3,211,643,835,057,357,300
| 38.739401
| 200
| 0.555395
| false
| 4.451257
| false
| false
| false
|
yuntae1000/SoftwareDesignFall15
|
Mini_project1/MP1_postfeedback.py
|
1
|
3420
|
#code to mining the urls from google and save it to local .txt files
# using patter. to search from Google
# I integrated all files in one single file after the feedback
# Using functions and readable documents
from pattern.web import Google
import indicoio
indicoio.config.api_key = '8d05933c4c2ca769d1e064dfbea1fe8a'
#save the results of the analysis in the "stats.txt" file.
# declare arrays which save raw url mined from pattern.search
# new york times urls, cbs new urls, wallstreet journal urls, foxnew urls
rawurl_nytimes=[]
rawurl_cbsnews=[]
rawurl_wsj=[]
rawurl_foxnews=[]
journal_names=['nytimes', 'cbsnews', 'wsj', 'foxnews']
rawurls=[rawurl_nytimes, rawurl_cbsnews, rawurl_wsj, rawurl_foxnews]
result_page=4
ny_analysis=[]
cbs_analysis=[]
wsj_analysis=[]
foxnews_analysis=[]
analysis=[ny_analysis,cbs_analysis, wsj_analysis,foxnews_analysis]
folders=["url_nytimes.txt", "url_cbsnews.txt", "url_wsj.txt", "url_foxnews.txt"]
g=Google()
#get the New York Times url
def get_articles(journal_num):
for i in range(1,result_page):
# search google results correspoding to the following keyword
for result in g.search('Donald Trump opinion site:'+journal_names[journal_num]+'.com', start=i):
rawurls[journal_num].append(result.url)
# saves the keyword to the local file in order to reduce query
# we will use this file for analyzing later on
def saveinfile(journal_num):
f=open('url_'+journal_names[journal_num]+'.txt', "w")
print >>f, rawurls[journal_num]
f.close()
def get_save_articles(journal_num):
get_articles(journal_num)
saveinfile(journal_num)
# then you can write a "master function" at the end which chains all of the steps
# in your process together
## get and save articles from all 4 medias
#mini project open the url file which we saved from harvesting and execute political analysis*/
##########################################
#for each files split the string by comma from the array
def analyze_text(journal_num):
f= open(folders[journal_num], 'r')
line=f.readline()
url_dummy=line.split(',') # dummy-> lists or urls,get all urls from the saved file
for i in range(len(url_dummy)-1):
# get rid of useless html.
url_dummy[i]=url_dummy[i][3:-1]
url_dummy[-1]=url_dummy[-1][3:-2] ## because last url has on more ' , get rid of it
## do political analysis using indicoio using the API and apped it to the array
for j in range(len(url_dummy)):
analysis[journal_num].append(indicoio.political(url_dummy[j]))
f.close()
## get the average of the analysis
## add all the results of the urls and divide with the number of urls
def political_analysis(journal_num):
sum_stats=[0,0,0,0] #sum of all stats gained from indicoio
for i in range(len(analysis)):
sum_stats[0]=sum_stats[0]+analysis[journal_num][i]["Libertarian"]
sum_stats[1]=sum_stats[1]+analysis[journal_num][i]["Green"]
sum_stats[2]=sum_stats[2]+analysis[journal_num][i]["Liberal"]
sum_stats[3]=sum_stats[3]+analysis[journal_num][i]["Conservative"]
aver_stats=[0,0,0,0]
for i in range(4):
aver_stats[i]=sum_stats[i]/float(len(analysis)) # divide by length to get average
print journal_names[journal_num]+" [Libertarian , Green , Liberal , Conservative]"
print aver_stats
# get_save_articles(0)
# get_save_articles(1)
# get_save_articles(2)
# get_save_articles(3)
for i in range(4):
get_save_articles(i)
analyze_text(i)
political_analysis(i)
|
mit
| -1,029,171,061,040,329,300
| 30.376147
| 98
| 0.713158
| false
| 2.971329
| false
| false
| false
|
ecotux/objectDetection
|
04saveSVM.py
|
1
|
1784
|
import cv2
import numpy as np
import os
import re
#############################################
#
# Gray magic:
# - the value of "C"
# - the value of "gamma"
# - the functions "preprocess*"
#
C = 20
gamma = 0.0005
#
# blurring image
#
def preprocess1(data):
img = cv2.GaussianBlur(data, (5,5), 0)
img = cv2.bilateralFilter(img,9,75,75)
img = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
return img
#
# data feature extraction
#
def preprocess2(data):
YCrCb = cv2.cvtColor(data, cv2.COLOR_BGR2YCR_CB)
normalized = np.ravel(np.float32(YCrCb)) / 255.0
return normalized[76800:]
#############################################
#
# Main
#
#############################################
if __name__ == '__main__':
xRes = 320
yRes = 240
dirTrain = 'trainData/'
params = dict( kernel_type = cv2.SVM_RBF, svm_type = cv2.SVM_C_SVC, C = C, gamma = gamma )
# Loading Training Set
print "Loading Training Set..."
numTrainSamples = len([name for name in os.listdir(dirTrain)])
trainSamples = np.empty( (numTrainSamples, yRes, xRes, 3), dtype = np.uint8 )
targets = np.empty( numTrainSamples, dtype = np.float32 )
for i, nameFile in enumerate(os.listdir(dirTrain)):
match1=re.search(r"object(\d+)",nameFile)
if match1:
trainSamples[i] = cv2.imread(dirTrain+nameFile)
targets[i] = np.float32(match1.group(1))
# Preprocessing Training Set
print 'Preprocessing Training Set...'
trainSet = np.array([preprocess2(preprocess1(trainSamples[i])) for i in np.ndindex(trainSamples.shape[:1])])
# Training
print 'Training SVM...'
model = cv2.SVM()
model.train(trainSet, targets, params = params)
# Saving
print 'saving SVM...'
model.save("objectSVM.xml")
|
mit
| -5,204,423,392,927,158,000
| 20.582278
| 109
| 0.598094
| false
| 2.910277
| false
| false
| false
|
zeroq/amun
|
vuln_modules/vuln-mydoom/mydoom_modul.py
|
1
|
3864
|
"""
[Amun - low interaction honeypot]
Copyright (C) [2014] [Jan Goebel]
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>
"""
try:
import psyco ; psyco.full()
from psyco.classes import *
except ImportError:
pass
import struct
import random
import mydoom_shellcodes
class vuln:
def __init__(self):
try:
self.vuln_name = "MYDOOM Vulnerability"
self.stage = "MYDOOM_STAGE1"
self.welcome_message = ""
self.shellcode = []
except KeyboardInterrupt:
raise
def print_message(self, data):
print "\n"
counter = 1
for byte in data:
if counter==16:
ausg = hex(struct.unpack('B',byte)[0])
if len(ausg) == 3:
list = str(ausg).split('x')
ausg = "%sx0%s" % (list[0],list[1])
print ausg
else:
print ausg
counter = 0
else:
ausg = hex(struct.unpack('B',byte)[0])
if len(ausg) == 3:
list = str(ausg).split('x')
ausg = "%sx0%s" % (list[0],list[1])
print ausg,
else:
print ausg,
counter += 1
print "\n>> Incoming Codesize: %s\n\n" % (len(data))
def getVulnName(self):
return self.vuln_name
def getCurrentStage(self):
return self.stage
def getWelcomeMessage(self):
return self.welcome_message
def incoming(self, message, bytes, ip, vuLogger, random_reply, ownIP):
try:
self.reply = []
for i in range(0,62):
try:
self.reply.append("\x00")
except KeyboardInterrupt:
raise
resultSet = {}
resultSet['vulnname'] = self.vuln_name
resultSet['result'] = False
resultSet['accept'] = False
resultSet['shutdown'] = False
resultSet['reply'] = "None"
resultSet['stage'] = self.stage
resultSet['shellcode'] = "None"
resultSet['isFile'] = False
if self.stage=="MYDOOM_STAGE1" and bytes==5:
if mydoom_shellcodes.mydoom_request_stage1==message:
resultSet['result'] = True
resultSet['accept'] = True
self.stage = "SHELLCODE"
return resultSet
elif self.stage=="MYDOOM_STAGE1" and (bytes==1024 or bytes==541 or bytes==645):
resultSet['result'] = True
resultSet['accept'] = True
#resultSet['reply'] = "".join(self.reply)
self.shellcode.append(message)
self.stage = "SHELLCODE"
#resultSet['shellcode'] = "".join(self.shellcode)
return resultSet
elif self.stage=="MYDOOM_STAGE1" and message.startswith('GET'):
resultSet['result'] = False
resultSet['accept'] = True
resultSet['shutdown'] = True
self.stage = "SHELLCODE"
return resultSet
elif self.stage=="SHELLCODE":
if bytes>0:
resultSet['result'] = True
resultSet['accept'] = True
#resultSet['reply'] = "".join(self.reply)
self.shellcode.append(message)
self.stage = "SHELLCODE"
#resultSet['shellcode'] = "".join(self.shellcode)
return resultSet
else:
resultSet['result'] = False
resultSet['accept'] = True
resultSet['isFile'] = True
resultSet['reply'] = "None"
self.shellcode.append(message)
resultSet['shellcode'] = "".join(self.shellcode)
return resultSet
else:
resultSet['result'] = False
resultSet['accept'] = False
resultSet['reply'] = "None"
return resultSet
return resultSet
except KeyboardInterrupt:
raise
except StandardError, e:
print e
return resultSet
except:
print "MYDOOM FATAL ERROR!"
|
gpl-2.0
| -7,833,382,891,770,390,000
| 28.272727
| 239
| 0.659161
| false
| 3.151713
| false
| false
| false
|
jgeskens/django
|
django/core/management/validation.py
|
1
|
23298
|
import collections
import sys
from django.conf import settings
from django.core.management.color import color_style
from django.utils.encoding import force_str
from django.utils.itercompat import is_iterable
from django.utils import six
class ModelErrorCollection:
def __init__(self, outfile=sys.stdout):
self.errors = []
self.outfile = outfile
self.style = color_style()
def add(self, context, error):
self.errors.append((context, error))
self.outfile.write(self.style.ERROR(force_str("%s: %s\n" % (context, error))))
def get_validation_errors(outfile, app=None):
"""
Validates all models that are part of the specified app. If no app name is provided,
validates all models of all installed apps. Writes errors, if any, to outfile.
Returns number of errors.
"""
from django.db import models, connection
from django.db.models.loading import get_app_errors
from django.db.models.deletion import SET_NULL, SET_DEFAULT
e = ModelErrorCollection(outfile)
for (app_name, error) in get_app_errors().items():
e.add(app_name, error)
for cls in models.get_models(app, include_swapped=True):
opts = cls._meta
# Check swappable attribute.
if opts.swapped:
try:
app_label, model_name = opts.swapped.split('.')
except ValueError:
e.add(opts, "%s is not of the form 'app_label.app_name'." % opts.swappable)
continue
if not models.get_model(app_label, model_name):
e.add(opts, "Model has been swapped out for '%s' which has not been installed or is abstract." % opts.swapped)
# No need to perform any other validation checks on a swapped model.
continue
# If this is the current User model, check known validation problems with User models
if settings.AUTH_USER_MODEL == '%s.%s' % (opts.app_label, opts.object_name):
# Check that the USERNAME FIELD isn't included in REQUIRED_FIELDS.
if cls.USERNAME_FIELD in cls.REQUIRED_FIELDS:
e.add(opts, 'The field named as the USERNAME_FIELD should not be included in REQUIRED_FIELDS on a swappable User model.')
# Check that the username field is unique
if not opts.get_field(cls.USERNAME_FIELD).unique:
e.add(opts, 'The USERNAME_FIELD must be unique. Add unique=True to the field parameters.')
# Model isn't swapped; do field-specific validation.
for f in opts.local_fields:
if f.name == 'id' and not f.primary_key and opts.pk.name == 'id':
e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name)
if f.name.endswith('_'):
e.add(opts, '"%s": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.' % f.name)
if (f.primary_key and f.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
e.add(opts, '"%s": Primary key fields cannot have null=True.' % f.name)
if isinstance(f, models.CharField):
try:
max_length = int(f.max_length)
if max_length <= 0:
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
except (ValueError, TypeError):
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
if isinstance(f, models.DecimalField):
decimalp_ok, mdigits_ok = False, False
decimalp_msg = '"%s": DecimalFields require a "decimal_places" attribute that is a non-negative integer.'
try:
decimal_places = int(f.decimal_places)
if decimal_places < 0:
e.add(opts, decimalp_msg % f.name)
else:
decimalp_ok = True
except (ValueError, TypeError):
e.add(opts, decimalp_msg % f.name)
mdigits_msg = '"%s": DecimalFields require a "max_digits" attribute that is a positive integer.'
try:
max_digits = int(f.max_digits)
if max_digits <= 0:
e.add(opts, mdigits_msg % f.name)
else:
mdigits_ok = True
except (ValueError, TypeError):
e.add(opts, mdigits_msg % f.name)
invalid_values_msg = '"%s": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.'
if decimalp_ok and mdigits_ok:
if decimal_places > max_digits:
e.add(opts, invalid_values_msg % f.name)
if isinstance(f, models.FileField) and not f.upload_to:
e.add(opts, '"%s": FileFields require an "upload_to" attribute.' % f.name)
if isinstance(f, models.ImageField):
try:
from django.utils.image import Image
except ImportError:
e.add(opts, '"%s": To use ImageFields, you need to install Pillow. Get it at https://pypi.python.org/pypi/Pillow.' % f.name)
if isinstance(f, models.BooleanField) and getattr(f, 'null', False):
e.add(opts, '"%s": BooleanFields do not accept null values. Use a NullBooleanField instead.' % f.name)
if isinstance(f, models.FilePathField) and not (f.allow_files or f.allow_folders):
e.add(opts, '"%s": FilePathFields must have either allow_files or allow_folders set to True.' % f.name)
if f.choices:
if isinstance(f.choices, six.string_types) or not is_iterable(f.choices):
e.add(opts, '"%s": "choices" should be iterable (e.g., a tuple or list).' % f.name)
else:
for c in f.choices:
if not isinstance(c, (list, tuple)) or len(c) != 2:
e.add(opts, '"%s": "choices" should be a sequence of two-tuples.' % f.name)
if f.db_index not in (None, True, False):
e.add(opts, '"%s": "db_index" should be either None, True or False.' % f.name)
# Perform any backend-specific field validation.
connection.validation.validate_field(e, opts, f)
# Check if the on_delete behavior is sane
if f.rel and hasattr(f.rel, 'on_delete'):
if f.rel.on_delete == SET_NULL and not f.null:
e.add(opts, "'%s' specifies on_delete=SET_NULL, but cannot be null." % f.name)
elif f.rel.on_delete == SET_DEFAULT and not f.has_default():
e.add(opts, "'%s' specifies on_delete=SET_DEFAULT, but has no default value." % f.name)
# Check to see if the related field will clash with any existing
# fields, m2m fields, m2m related objects or related objects
if f.rel:
if f.rel.to not in models.get_models():
# If the related model is swapped, provide a hint;
# otherwise, the model just hasn't been installed.
if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped:
e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable))
else:
e.add(opts, "'%s' has a relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, six.string_types):
continue
# Make sure the related field specified by a ForeignKey is unique
if f.requires_unique_target:
if len(f.foreign_related_fields) > 1:
has_unique_field = False
for rel_field in f.foreign_related_fields:
has_unique_field = has_unique_field or rel_field.unique
if not has_unique_field:
e.add(opts, "Field combination '%s' under model '%s' must have a unique=True constraint" % (','.join([rel_field.name for rel_field in f.foreign_related_fields]), f.rel.to.__name__))
else:
if not f.foreign_related_fields[0].unique:
e.add(opts, "Field '%s' under model '%s' must have a unique=True constraint." % (f.foreign_related_fields[0].name, f.rel.to.__name__))
rel_opts = f.rel.to._meta
rel_name = f.related.get_accessor_name()
rel_query_name = f.related_query_name()
if not f.rel.is_hidden():
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
seen_intermediary_signatures = []
for i, f in enumerate(opts.local_many_to_many):
# Check to see if the related m2m field will clash with any
# existing fields, m2m fields, m2m related objects or related
# objects
if f.rel.to not in models.get_models():
# If the related model is swapped, provide a hint;
# otherwise, the model just hasn't been installed.
if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped:
e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable))
else:
e.add(opts, "'%s' has an m2m relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, six.string_types):
continue
# Check that the field is not set to unique. ManyToManyFields do not support unique.
if f.unique:
e.add(opts, "ManyToManyFields cannot be unique. Remove the unique argument on '%s'." % f.name)
if f.rel.through is not None and not isinstance(f.rel.through, six.string_types):
from_model, to_model = cls, f.rel.to
if from_model == to_model and f.rel.symmetrical and not f.rel.through._meta.auto_created:
e.add(opts, "Many-to-many fields with intermediate tables cannot be symmetrical.")
seen_from, seen_to, seen_self = False, False, 0
for inter_field in f.rel.through._meta.fields:
rel_to = getattr(inter_field.rel, 'to', None)
if from_model == to_model: # relation to self
if rel_to == from_model:
seen_self += 1
if seen_self > 2:
e.add(opts, "Intermediary model %s has more than "
"two foreign keys to %s, which is ambiguous "
"and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
if rel_to == from_model:
if seen_from:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
seen_from = True
elif rel_to == to_model:
if seen_to:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
rel_to._meta.object_name
)
)
else:
seen_to = True
if f.rel.through not in models.get_models(include_auto_created=True):
e.add(opts, "'%s' specifies an m2m relation through model "
"%s, which has not been installed." % (f.name, f.rel.through)
)
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
e.add(opts, "The model %s has two manually-defined m2m "
"relations through the model %s, which is not "
"permitted. Please consider using an extra field on "
"your intermediary model instead." % (
cls._meta.object_name,
f.rel.through._meta.object_name
)
)
else:
seen_intermediary_signatures.append(signature)
if not f.rel.through._meta.auto_created:
seen_related_fk, seen_this_fk = False, False
for field in f.rel.through._meta.fields:
if field.rel:
if not seen_related_fk and field.rel.to == f.rel.to:
seen_related_fk = True
elif field.rel.to == cls:
seen_this_fk = True
if not seen_related_fk or not seen_this_fk:
e.add(opts, "'%s' is a manually-defined m2m relation "
"through model %s, which does not have foreign keys "
"to %s and %s" % (f.name, f.rel.through._meta.object_name,
f.rel.to._meta.object_name, cls._meta.object_name)
)
elif isinstance(f.rel.through, six.string_types):
e.add(opts, "'%s' specifies an m2m relation through model %s, "
"which has not been installed" % (f.name, f.rel.through)
)
rel_opts = f.rel.to._meta
rel_name = f.related.get_accessor_name()
rel_query_name = f.related_query_name()
# If rel_name is none, there is no reverse accessor (this only
# occurs for symmetrical m2m relations to self). If this is the
# case, there are no clashes to check for this field, as there are
# no reverse descriptors for this field.
if rel_name is not None:
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
# Check ordering attribute.
if opts.ordering:
for field_name in opts.ordering:
if field_name == '?':
continue
if field_name.startswith('-'):
field_name = field_name[1:]
if opts.order_with_respect_to and field_name == '_order':
continue
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field_name:
continue
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
if field_name == 'pk':
continue
try:
opts.get_field(field_name, many_to_many=False)
except models.FieldDoesNotExist:
e.add(opts, '"ordering" refers to "%s", a field that doesn\'t exist.' % field_name)
# Check unique_together.
for ut in opts.unique_together:
validate_local_fields(e, opts, "unique_together", ut)
if not isinstance(opts.index_together, collections.Sequence):
e.add(opts, '"index_together" must a sequence')
else:
for it in opts.index_together:
validate_local_fields(e, opts, "index_together", it)
return len(e.errors)
def validate_local_fields(e, opts, field_name, fields):
from django.db import models
if not isinstance(fields, collections.Sequence):
e.add(opts, 'all %s elements must be sequences' % field_name)
else:
for field in fields:
try:
f = opts.get_field(field, many_to_many=True)
except models.FieldDoesNotExist:
e.add(opts, '"%s" refers to %s, a field that doesn\'t exist.' % (field_name, field))
else:
if isinstance(f.rel, models.ManyToManyRel):
e.add(opts, '"%s" refers to %s. ManyToManyFields are not supported in %s.' % (field_name, f.name, field_name))
if f not in opts.local_fields:
e.add(opts, '"%s" refers to %s. This is not in the same model as the %s statement.' % (field_name, f.name, field_name))
|
bsd-3-clause
| -6,676,675,528,759,319,000
| 62.655738
| 264
| 0.533565
| false
| 4.156646
| false
| false
| false
|
tqchen/tvm
|
python/tvm/auto_scheduler/auto_schedule.py
|
1
|
7844
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
User interface for TVM Auto-scheduler.
The basic schedule search process for TVM Auto-scheduler is designed to be:
`Program sampling` -> `Performance Tuning`.
In `Program sampling`, we use some predefined precise or heuristic rules to generate several
initial schedules. Based on these initial starting points, we perform `Performance Tuning` which
uses cost model based evolutionary search to select schedules with the best performance.
Candidate schedules are measured against the specific hardware target.
"""
import tvm._ffi
from tvm.runtime import Object
from .measure import LocalBuilder, LocalRunner
from .workload_registry import make_workload_key
from .compute_dag import ComputeDAG
from .cost_model import XGBModel
from .search_policy import SketchPolicy
from . import _ffi_api
@tvm._ffi.register_object("auto_scheduler.HardwareParams")
class HardwareParams(Object):
"""The parameters of target hardware used to guide the search policy
TODO(jcf94): This is considered to be merged with the new Target specification:
https://discuss.tvm.ai/t/rfc-tvm-target-specification/6844
Parameters
----------
num_cores : int
The number of device cores.
vector_unit_bytes : int
The width of vector units in bytes.
cache_line_bytes : int
The size of cache line in bytes.
"""
def __init__(self, num_cores, vector_unit_bytes, cache_line_bytes):
self.__init_handle_by_constructor__(
_ffi_api.HardwareParams, num_cores, vector_unit_bytes, cache_line_bytes
)
@tvm._ffi.register_object("auto_scheduler.SearchTask")
class SearchTask(Object):
"""The computation information and hardware parameters for a schedule search task.
Parameters
----------
dag : ComputeDAG
The ComputeDAG for the corresponding compute declaration.
workload_key : str
The workload key for the corresponding compute declaration.
target : tvm.target.Target
The target device of this search task.
target_host : Optional[tvm.target.Target]
The target host device of this search task.
hardware_params : Optional[HardwareParams]
Hardware parameters used in this search task.
"""
def __init__(self, dag, workload_key, target, target_host=None, hardware_params=None):
self.__init_handle_by_constructor__(
_ffi_api.SearchTask, dag, workload_key, target, target_host, hardware_params
)
@tvm._ffi.register_object("auto_scheduler.TuningOptions")
class TuningOptions(Object):
"""This controls the options of performance tuning.
Parameters
----------
num_measure_trials: int = 0
The number of measurement trials.
The search policy measures `num_measure_trials` schedules in total and returns the best one
among them.
With `num_measure_trials` == 0, the policy will do the schedule search but won't involve
measurement. This can be used to get a runnable schedule quickly without auto-tuning.
early_stopping: Optional[int]
Stop the tuning early if getting no improvement after n measurements.
num_measures_per_round: int = 64
The number of schedules to be measured at each search round.
The whole schedule search process will try a total number of `num_measure_trials` in several
rounds.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during schedule search.
builder: Union[ProgramBuilder, str] = 'local'
ProgramBuilder which builds the program.
runner: Union[ProgramRunner, str] = 'local'
ProgramRunner which runs the program and measures time costs.
measure_callbacks: Optional[List[MeasureCallback]]
Callback functions called after each measurement.
Candidates:
- auto_scheduler.RecordToFile
"""
def __init__(
self,
num_measure_trials=0,
early_stopping=None,
num_measures_per_round=64,
verbose=1,
builder="local",
runner="local",
measure_callbacks=None,
):
if isinstance(builder, str):
if builder == "local":
builder = LocalBuilder()
else:
raise ValueError("Invalid builder: " + builder)
elif not isinstance(builder, tvm.auto_scheduler.measure.ProgramBuilder):
raise ValueError(
"Invalid builder: "
+ builder
+ " . TuningOptions expects a ProgramBuilder or string."
)
if isinstance(runner, str):
if runner == "local":
runner = LocalRunner()
else:
raise ValueError("Invalid runner: " + runner)
elif not isinstance(runner, tvm.auto_scheduler.measure.ProgramRunner):
raise ValueError(
"Invalid runner: " + runner + " . TuningOptions expects a ProgramRunner or string."
)
self.__init_handle_by_constructor__(
_ffi_api.TuningOptions,
num_measure_trials,
early_stopping or -1,
num_measures_per_round,
verbose,
builder,
runner,
measure_callbacks,
)
def create_task(func, args, target, target_host=None, hardware_params=None):
"""Create a search task
Parameters
----------
func : Union[Function, str]
The function that returns the compute declaration Tensors.
Can be the a function or the function name.
args : Union[Tuple[Any, ...], List[Any]]
The args of the function.
target : tvm.target.Target
The target device of this search task.
target_host : Optional[tvm.target.Target]
The target host device of this search task.
hardware_params : Optional[HardwareParams]
Hardware parameters used in this search task.
Returns
-------
SearchTask: the created task
"""
workload_key = make_workload_key(func, args)
dag = ComputeDAG(workload_key)
return SearchTask(dag, workload_key, target, target_host, hardware_params)
def auto_schedule(task, search_policy=None, tuning_options=TuningOptions()):
"""Run auto scheduling search for a task
Parameters
----------
task : SearchTask
The SearchTask for the computation declaration.
search_policy : Optional[SearchPolicy]
The search policy to be used for schedule search.
tuning_options : Optional[TuningOptions]
Tuning and measurement options.
Returns
-------
A `te.schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`.
"""
if not isinstance(task, SearchTask):
raise ValueError(
"Invalid task: " + task + " . `auto_scheduler.auto_schedule` expects a SearchTask."
)
if search_policy is None:
cost_model = XGBModel()
search_policy = SketchPolicy(task, cost_model)
sch, tensors = _ffi_api.AutoSchedule(search_policy, tuning_options)
return sch, tensors
|
apache-2.0
| 6,643,239,991,650,872,000
| 35.654206
| 100
| 0.666624
| false
| 4.30989
| false
| false
| false
|
jpwarren/holideck
|
examples/soundlevel.py
|
1
|
1766
|
#!/usr/bin/python
"""
A sound level meter for the MooresCloud Holiday.
Requires PyAudio.
Copyright (c) 2013, Josh Deprez
License: MIT (see LICENSE for details)
"""
__author__ = 'Josh Deprez'
__version__ = '0.01-dev'
__license__ = 'MIT'
import pyaudio
import audioop
import struct
import math
import holiday
import sys
import time
def render(hol, value):
for i in xrange(value):
alpha = i / 50.0
beta = 1.0 - alpha
hol.setglobe(i, alpha * 0xFF, beta * 0xFF, 0x00) # Green -> Red
# Black remaining lights
for i in xrange(value,50):
hol.setglobe(i, 0x00, 0x00, 0x00)
hol.render()
return
if __name__ == '__main__':
if len(sys.argv) > 1:
the_hostname = sys.argv[1]
print the_hostname
else:
# Assume the holiday is the simulator
the_hostname = 'localhost:8080'
hol = holiday.Holiday(remote=True,addr=the_hostname)
render(hol, 0)
# Do PyAudio stuff
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
INPUT_BLOCK_TIME = 0.02
BUFFER = 1024 #Seems to work...
# How do we select the appropriate input device?
#input_device = 0 #Built-in Microphone (seems good for OSX)
#input_device = 3 # this seems to be correct for juno
input_device = 15 # Ubuntu default
pa = pyaudio.PyAudio()
stream = pa.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
#input_device_index = input_device,
frames_per_buffer = BUFFER)
SCALE = 300 # Probably need to tweak this
MAX_LIGHT = 50
errorcount = 0
print "Press Ctrl-C to quit"
while True:
try:
block = stream.read(BUFFER)
except IOError, e:
errorcount += 1
print( "(%d) Error recording: %s"%(errorcount,e))
amplitude = audioop.rms(block, 2)
#print amplitude
render(hol, min(amplitude / SCALE, MAX_LIGHT))
|
mit
| 655,493,103,852,588,200
| 20.277108
| 65
| 0.665345
| false
| 2.834671
| false
| false
| false
|
beni55/Sublime-BracketGuard
|
BracketGuard.py
|
1
|
2769
|
import sublime, sublime_plugin
import re
from collections import namedtuple
BracketPosition = namedtuple("BracketPosition", "position opener")
BracketResult = namedtuple("BracketResult", "success start end")
# House keeping for the async beast
activeChecks = 0
dismissedChecks = 0
# scopeNames is used to avoid a weird memory leak with Sublime Text which occurs
# when calling view.scope_name within an async routine
scopeNames = []
class SelectionListener(sublime_plugin.EventListener):
def on_modified(self, view):
global scopeNames
scopeNames = [view.scope_name(i) for i in range(len(self.getBufferContent(view)))]
if view.settings().get("is_test", False):
self.on_modified_async(view)
def on_modified_async(self, view):
global activeChecks, dismissedChecks
if activeChecks > 0:
dismissedChecks += 1
return
contentRegion = sublime.Region(0, view.size())
bufferContent = self.getBufferContent(view)
activeChecks += 1
bracketResult = getFirstBracketError(bufferContent, view)
if dismissedChecks > 0:
dismissedChecks = 0
bracketResult = getFirstBracketError(bufferContent, view)
activeChecks -= 1
if bracketResult.success:
view.erase_regions("BracketGuardRegions")
else:
openerRegion = sublime.Region(bracketResult.start, bracketResult.start + 1)
closerRegion = sublime.Region(bracketResult.end, bracketResult.end + 1)
view.add_regions("BracketGuardRegions", [openerRegion, closerRegion], "invalid")
def getBufferContent(self, view):
contentRegion = sublime.Region(0, view.size())
return view.substr(contentRegion)
def getFirstBracketError(codeStr, view):
global scopeNames
opener = list("({[")
closer = list(")}]")
matchingStack = []
for index, char in enumerate(codeStr):
if dismissedChecks > 0:
# we will have to start over
return BracketResult(True, -1, -1)
scopeName = scopeNames[index]
if "string" in scopeName or "comment" in scopeName:
# ignore unmatched brackets in strings and comments
continue
if char in opener:
matchingStack.append(BracketPosition(index, char))
elif char in closer:
matchingOpener = opener[closer.index(char)]
if len(matchingStack) == 0:
return BracketResult(False, -1, index)
poppedOpener = matchingStack.pop()
if matchingOpener != poppedOpener.opener:
return BracketResult(False, poppedOpener.position, index)
if len(matchingStack) == 0:
return BracketResult(True, -1, -1)
else:
poppedOpener = matchingStack.pop()
return BracketResult(False, poppedOpener.position, -1)
|
mit
| -2,855,513,871,807,219,000
| 25.969697
| 86
| 0.681835
| false
| 3.9
| false
| false
| false
|
kotoroshinoto/TCGA_MAF_Analysis
|
gooch_maf_tools/commands/analysis/get_stats/mutcount_length_linreg.py
|
1
|
4685
|
import rpy2.robjects.packages as rpackages
import rpy2.robjects as ro
import click
import csv
import sys
from typing import List
from typing import Dict
from ..get_stats.util import *
class GeneLinregEntry:
def __init__(self, symbol: str):
self.symbol = symbol
self.symbol_key = symbol.casefold()
self.count = 0
self.length = 0
class GeneLinregData:
def __init__(self):
self.data_dict = dict() # type: Dict[str, GeneLinregEntry]
self.symbol_list = list() #type: List[str]
def read_count_file(self, filehandle, name_col: int, count_col: int, has_header: bool):
reader = csv.reader(filehandle, dialect='excel-tab')
if has_header:
next(reader) # skip first line
for row in reader:
symbol = row[name_col]
symbol_key = symbol.casefold()
if symbol_key not in self.data_dict:
entry = GeneLinregEntry(symbol) # type: GeneLinregEntry
self.data_dict[symbol_key] = entry
self.symbol_list.append(symbol_key)
else:
entry = self.data_dict[symbol_key] # type: GeneLinregEntry
entry.count = int(row[count_col])
def read_length_file(self, filehandle, name_col: int, length_col: int, has_header: bool):
reader = csv.reader(filehandle, dialect='excel-tab')
if has_header:
next(reader) # skip first line
for row in reader:
symbol = row[name_col]
symbol_key = symbol.casefold()
if (symbol_key not in self.symbol_list) or (symbol_key not in self.data_dict):
continue
entry = self.data_dict[symbol_key]
entry.length = int(row[length_col])
def generate_count_vector(self) -> ro.IntVector:
counts = list()
for symbol in self.symbol_list:
counts.append(self.data_dict[symbol].count)
return ro.IntVector(counts)
def generate_length_vector(self) -> ro.IntVector:
lengths = list()
for symbol in self.symbol_list:
lengths.append(self.data_dict[symbol].length)
return ro.IntVector(lengths)
def get_symbol_list(self):
return self.symbol_list
@click.command(name='Gene_Outliers', help="compute studentized residuals for list of gene counts")
@click.option('--count_file', type=(click.File('r'), int, int), default=(None, None, None), required=True, help="count file, symbol column, count column")
@click.option('--length_file', type=(click.File('r'), int, int), default=(None, None, None), required=True, help="length file, symbol column, length column")
@click.option('--header_count/--noheader_count', default=True)
@click.option('--header_length/--noheader_length', default=True)
@click.option('--header_name_map/--noheader_name_map', default=True)
@click.option('--output', required=False, default=None, type=click.Path(dir_okay=False, writable=True), help="output file path")
@click.pass_context
def cli(ctx, count_file, length_file, output, header_count, header_length, header_name_map):
#TODO find out why some lengths are not matching and are being given a size of zero
errormsg=list()
if count_file[0] is None:
errormsg.append("--count_file is required")
if length_file[0] is None:
errormsg.append("--length_file is required")
# if name_map_file[0] is None:
# errormsg.append("--name_map_file is required")
if len(errormsg) > 0:
print(cli.get_help(ctx))
raise click.UsageError(', '.join(errormsg))
check_and_install_R_dependency('MASS')
rpackages.importr('MASS')
linreg_data = GeneLinregData()
#read in counts file
linreg_data.read_count_file(count_file[0], count_file[1], count_file[2], header_count)
#read in length file
linreg_data.read_length_file(length_file[0], length_file[1], length_file[2], header_length)
length_vector = linreg_data.generate_length_vector()
count_vctr = linreg_data.generate_count_vector()
ro.r('x=' + str(length_vector.r_repr()))
ro.r('y=' + str(count_vctr.r_repr()))
linreg_result = ro.r('lm(y~x)')
studres_func = ro.r('studres')
studres_result = studres_func(linreg_result)
if output is None:
output_file = sys.stdout
else:
output_file = open(output, newline='', mode='w')
fieldnames = list()
fieldnames.append('Gene_Symbol')
fieldnames.append('Length')
fieldnames.append('Mutation_Count')
fieldnames.append('Studentized_Residual')
output_writer = csv.writer(output_file, dialect='excel-tab')
output_writer.writerow(fieldnames)
symbol_list = linreg_data.symbol_list
for i in range(0, len(symbol_list)):
symbol = symbol_list[i]
if (symbol not in linreg_data.symbol_list) or (symbol not in linreg_data.data_dict):
continue
dataentry = linreg_data.data_dict[symbol] # type: GeneLinregEntry
row = list()
row.append(dataentry.symbol)
row.append(dataentry.length)
row.append(dataentry.count)
row.append(studres_result[i])
output_writer.writerow(row)
if __name__ == "__main__":
cli()
|
unlicense
| -2,991,018,952,282,382,000
| 33.962687
| 158
| 0.710566
| false
| 2.909938
| false
| false
| false
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/packet_capture_result.py
|
1
|
3902
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PacketCaptureResult(Model):
"""Information about packet capture session.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: Name of the packet capture session.
:vartype name: str
:ivar id: ID of the packet capture operation.
:vartype id: str
:param etag: Default value: "A unique read-only string that changes
whenever the resource is updated." .
:type etag: str
:param target: Required. The ID of the targeted resource, only VM is
currently supported.
:type target: str
:param bytes_to_capture_per_packet: Number of bytes captured per packet,
the remaining bytes are truncated. Default value: 0 .
:type bytes_to_capture_per_packet: int
:param total_bytes_per_session: Maximum size of the capture output.
Default value: 1073741824 .
:type total_bytes_per_session: int
:param time_limit_in_seconds: Maximum duration of the capture session in
seconds. Default value: 18000 .
:type time_limit_in_seconds: int
:param storage_location: Required.
:type storage_location:
~azure.mgmt.network.v2017_06_01.models.PacketCaptureStorageLocation
:param filters:
:type filters:
list[~azure.mgmt.network.v2017_06_01.models.PacketCaptureFilter]
:param provisioning_state: The provisioning state of the packet capture
session. Possible values include: 'Succeeded', 'Updating', 'Deleting',
'Failed'
:type provisioning_state: str or
~azure.mgmt.network.v2017_06_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'target': {'required': True},
'storage_location': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'target': {'key': 'properties.target', 'type': 'str'},
'bytes_to_capture_per_packet': {'key': 'properties.bytesToCapturePerPacket', 'type': 'int'},
'total_bytes_per_session': {'key': 'properties.totalBytesPerSession', 'type': 'int'},
'time_limit_in_seconds': {'key': 'properties.timeLimitInSeconds', 'type': 'int'},
'storage_location': {'key': 'properties.storageLocation', 'type': 'PacketCaptureStorageLocation'},
'filters': {'key': 'properties.filters', 'type': '[PacketCaptureFilter]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(PacketCaptureResult, self).__init__(**kwargs)
self.name = None
self.id = None
self.etag = kwargs.get('etag', "A unique read-only string that changes whenever the resource is updated.")
self.target = kwargs.get('target', None)
self.bytes_to_capture_per_packet = kwargs.get('bytes_to_capture_per_packet', 0)
self.total_bytes_per_session = kwargs.get('total_bytes_per_session', 1073741824)
self.time_limit_in_seconds = kwargs.get('time_limit_in_seconds', 18000)
self.storage_location = kwargs.get('storage_location', None)
self.filters = kwargs.get('filters', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
|
mit
| 1,933,286,419,347,813,400
| 44.372093
| 114
| 0.639672
| false
| 4.026832
| false
| false
| false
|
teng-lin/teng-lin.github.io
|
scripts/scrape_linkedin.py
|
1
|
4038
|
import sys
import subprocess
import json
import re
import pandas as pd
from pandas import ExcelWriter
_id_offset_pattern = re.compile(u'id=(\d+)&offset=(\d+)')
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def _parse_json(fileobj):
data = json.load(fileobj, object_hook=_decode_dict)
content = data['content']
connections = []
if 'connections' in content:
if 'connections' in content['connections']:
connections = content['connections']['connections']
return connections
def get_id_offset(url):
"""
parse id and offset from the url
:param url:
:return: id and offset as two element tuple
"""
id = None
offset = None
match = re.search(_id_offset_pattern, url)
if match:
id = match.group(1)
offset = match.group(2)
return id, offset
def set_id_offset(url, id, offset):
"""
change id and offset in url
"""
new_url = re.sub(_id_offset_pattern, u'id=%s&offset=%s' % (id, offset), url)
return new_url
def retrive_connection(cmd, id, offset):
"""
Retreive connections for specific linkedin id from offset
:param cmd: curl command as a list of string
:param id: Linkedin id
:param offset:
:return: a list of connections
Below is an example of the curl command copied from Chrome's developer tool
curl_command = [
"curl",
'https://www.linkedin.com/profile/profile-v2-connections?id=14271099&offset=90&count=10&distance=1&type=INITIAL&_=1434080930325' ,
"-H",
'Cookie: bcookie="v=2&6789ccca-a705-4829-8306-6555c44011e5"; visit="v=1&M"; __qca=P0-341338068-1407868716823; VID=V_2014_10_31_02_1849;
"-H",
'DNT: 1',
"-H",
'Accept-Encoding: gzip, deflate, sdch',
"-H",
'Accept-Language: en-US,en;q=0.8',
"-H",
'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36',
"-H",
'Accept: text/plain, */*; q=0.01',
"-H",
'Referer:https://www.linkedin.com/profile/view?id=14271099&authType=NAME_SEARCH&authToken=ig21&locale=en_US&srchid=142710991434080925044
"-H",
'X-Requested-With: XMLHttpRequest' ,
"-H",
'Connection: keep-alive',
"--compressed"
]
"""
command = cmd[:]
# modify url
command[1] = set_id_offset(command[1], id, offset)
print command[1]
# run curl command and redirect response json to stdout
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
proc.wait()
return _parse_json(proc.stdout)
if __name__ == '__main__':
bashCommand = sys.argv[1:]
url = bashCommand[1]
uid, offset = get_id_offset(url)
all_connections = []
offset = 0
count = 10 # the number of connections is hard-coded to 10
# call "unofficial" Linkedin API to retrieve all second degree connection of specific user
while True:
connections = retrive_connection(bashCommand, uid, offset)
if len(connections) == 0:
break
all_connections.extend(connections)
offset += count
print "total number of connections: %d" % len(all_connections)
excel = '%s.xlsx' % uid
print "writing %s" % excel
# Save all connections to excel spreadsheet
df = pd.DataFrame(all_connections)
writer = ExcelWriter(excel)
df.to_excel(writer, 'Connection', index=False)
writer.save()
|
mit
| 2,318,859,692,451,768,300
| 25.220779
| 140
| 0.620109
| false
| 3.393277
| false
| false
| false
|
gutouyu/cs231n
|
cs231n/assignment/assignment2/cs231n/classifiers/cnn.py
|
1
|
6283
|
from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.layer_utils import *
class ThreeLayerConvNet(object):
"""
A three-layer convolutional network with the following architecture:
conv - relu - 2x2 max pool - affine - relu - affine - softmax
The network operates on minibatches of data that have shape (N, C, H, W)
consisting of N images, each with height H and width W and with C input
channels.
"""
def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,
hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,
dtype=np.float32):
"""
Initialize a new network.
Inputs:
- input_dim: Tuple (C, H, W) giving size of input data
- num_filters: Number of filters to use in the convolutional layer
- filter_size: Size of filters to use in the convolutional layer
- hidden_dim: Number of units to use in the fully-connected hidden layer
- num_classes: Number of scores to produce from the final affine layer.
- weight_scale: Scalar giving standard deviation for random initialization
of weights.
- reg: Scalar giving L2 regularization strength
- dtype: numpy datatype to use for computation.
"""
self.params = {}
self.reg = reg
self.dtype = dtype
############################################################################
# TODO: Initialize weights and biases for the three-layer convolutional #
# network. Weights should be initialized from a Gaussian with standard #
# deviation equal to weight_scale; biases should be initialized to zero. #
# All weights and biases should be stored in the dictionary self.params. #
# Store weights and biases for the convolutional layer using the keys 'W1' #
# and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #
# hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #
# of the output affine layer. #
############################################################################
#pass
C,H,W = input_dim
self.params['W1'] = np.random.randn(num_filters, C, filter_size, filter_size) * weight_scale
self.params['b1'] = np.zeros(num_filters)
self.params['W2'] = np.random.randn((H/2)*(W/2)*num_filters,hidden_dim) * weight_scale
self.params['b2'] = np.zeros(hidden_dim)
self.params['W3'] = np.random.randn(hidden_dim, num_classes) * weight_scale
self.params['b3'] = np.zeros(num_classes)
############################################################################
# END OF YOUR CODE #
############################################################################
for k, v in self.params.items():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Evaluate loss and gradient for the three-layer convolutional network.
Input / output: Same API as TwoLayerNet in fc_net.py.
"""
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
W3, b3 = self.params['W3'], self.params['b3']
# pass conv_param to the forward pass for the convolutional layer
filter_size = W1.shape[2]
conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}
# pass pool_param to the forward pass for the max-pooling layer
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
scores = None
############################################################################
# TODO: Implement the forward pass for the three-layer convolutional net, #
# computing the class scores for X and storing them in the scores #
# variable. #
############################################################################
#pass
conv_relu_pool_out, cache_conv = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)
hidden_out, cache_hidden = affine_relu_forward(conv_relu_pool_out, W2, b2)
scores, cache_scores = affine_forward(hidden_out, W3, b3)
############################################################################
# END OF YOUR CODE #
############################################################################
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the three-layer convolutional net, #
# storing the loss and gradients in the loss and grads variables. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
############################################################################
#pass
# conv - relu - 2x2 max pool - affine - relu - affine - softmax
loss, dscores = softmax_loss(scores, y)
loss += 0.5 * self.reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3))
dhidden_out, dW3, db3 = affine_backward(dscores, cache_scores)
dconv_relu_pool_out, dW2, db2 = affine_relu_backward(dhidden_out, cache_hidden)
dX, dW1, db1 = conv_relu_pool_backward(dconv_relu_pool_out, cache_conv)
dW1 += self.reg * W1
dW2 += self.reg * W2
dW3 += self.reg * W3
grads['W1'], grads['b1'] = dW1, db1
grads['W2'], grads['b2'] = dW2, db2
grads['W3'], grads['b3'] = dW3, db3
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
|
mit
| 6,995,892,720,637,449,000
| 48.865079
| 100
| 0.482572
| false
| 4.354123
| false
| false
| false
|
rwblair/cogat
|
cognitive/apps/atlas/test_urls.py
|
1
|
4234
|
from django.core.urlresolvers import resolve
from django.test import TestCase
class AtlasUrlTestCase(TestCase):
def test_all_concepts(self):
found = resolve('/concepts')
self.assertEqual(found.view_name, 'all_concepts')
def test_all_tasks(self):
found = resolve('/tasks')
self.assertEqual(found.view_name, 'all_tasks')
def test_all_batteries(self):
found = resolve('/batteries')
self.assertEqual(found.view_name, 'all_batteries')
def test_all_theories(self):
found = resolve('/theories')
self.assertEqual(found.view_name, 'all_theories')
def test_all_disorders(self):
found = resolve('/disorders')
self.assertEqual(found.view_name, 'all_disorders')
def test_concepts_by_letter(self):
found = resolve('/concepts/a/')
self.assertEqual(found.view_name, 'concepts_by_letter')
def test_tasks_by_letter(self):
found = resolve('/tasks/a/')
self.assertEqual(found.view_name, 'tasks_by_letter')
def test_view_concept(self):
found = resolve('/concept/id/fake_123/')
self.assertEqual(found.view_name, 'concept')
def test_view_task(self):
found = resolve('/task/id/fake_123/')
self.assertEqual(found.view_name, 'task')
def test_view_battery(self):
found = resolve('/battery/id/fake_123/')
self.assertEqual(found.view_name, 'battery')
def test_view_theory(self):
found = resolve('/theory/id/fake_123/')
self.assertEqual(found.view_name, 'theory')
def test_view_disorder(self):
found = resolve('/disorder/id/fake_123/')
self.assertEqual(found.view_name, 'disorder')
def test_contribute_term(self):
found = resolve('/terms/new/')
self.assertEqual(found.view_name, 'contribute_term')
def test_add_term(self):
found = resolve('/terms/add/')
self.assertEqual(found.view_name, 'add_term')
def test_update_concept(self):
found = resolve('/concept/update/fake_123/')
self.assertEqual(found.view_name, 'update_concept')
def test_update_task(self):
found = resolve('/task/update/fake_123/')
self.assertEqual(found.view_name, 'update_task')
def test_update_theory(self):
found = resolve('/theory/update/fake_123/')
self.assertEqual(found.view_name, 'update_theory')
def test_update_disorder(self):
found = resolve('/disorder/update/fake_123/')
self.assertEqual(found.view_name, 'update_disorder')
def test_add_concept_relation(self):
found = resolve('/concept/assert/fake_123/')
self.assertEqual(found.view_name, 'add_concept_relation')
def test_add_task_contrast(self):
found = resolve('/task/add/contrast/fake_123/')
self.assertEqual(found.view_name, 'add_task_contrast')
def test_add_task_concept(self):
found = resolve('/task/add/concept/fake_123/')
self.assertEqual(found.view_name, 'add_task_concept')
def test_add_concept_contrast(self):
found = resolve('/concept/add/contrast/fake_123/')
self.assertEqual(found.view_name, 'add_concept_contrast_task')
def test_add_contrast(self):
found = resolve('/contrast/add/fake_123/')
self.assertEqual(found.view_name, 'add_contrast')
def test_add_condition(self):
found = resolve('/condition/add/fake_123/')
self.assertEqual(found.view_name, 'add_condition')
def test_search_all(self):
found = resolve('/search')
self.assertEqual(found.view_name, 'search')
def test_search_concept(self):
found = resolve('/concepts/search')
self.assertEqual(found.view_name, 'search_concept')
'''
# Graph views
url(r'^graph/task/(?P<uid>[\w\+%_& ]+)/$', graph.task_graph, name="task_graph"),
url(r'^graph/concept/(?P<uid>[\w\+%_& ]+)/$', graph.concept_graph, name="concept_graph"),
url(r'^graph/$', graph.explore_graph, name="explore_graph"),
url(r'^graph/task/(?P<uid>[\w\+%_& ]+)/gist$', graph.task_gist, name="task_gist"),
url(r'^graph/task/(?P<uid>[\w\+%_& ]+)/gist/download$', graph.download_task_gist, name="download_task_gist"),
'''
|
mit
| -143,582,583,258,756,080
| 35.188034
| 113
| 0.630846
| false
| 3.442276
| true
| false
| false
|
twosigma/Cook
|
cli/cook/subcommands/usage.py
|
1
|
12216
|
import json
import sys
from tabulate import tabulate
from cook import http, terminal
from cook.format import format_job_memory, format_memory_amount
from cook.querying import query_across_clusters, make_job_request
from cook.util import guard_no_cluster, current_user, print_info, print_error
def get_job_data(cluster, usage_map):
"""Gets data for jobs in usage map if it has any"""
ungrouped_running_job_uuids = usage_map['ungrouped']['running_jobs']
job_uuids_to_retrieve = ungrouped_running_job_uuids[:]
grouped = usage_map['grouped']
group_uuid_to_name = {}
for group_usage in grouped:
group = group_usage['group']
job_uuids_to_retrieve.extend(group['running_jobs'])
group_uuid_to_name[group['uuid']] = group['name']
applications = {}
num_running_jobs = len(job_uuids_to_retrieve)
if num_running_jobs > 0:
jobs = http.make_data_request(cluster, lambda: make_job_request(cluster, job_uuids_to_retrieve))
for job in jobs:
application = job['application']['name'] if 'application' in job else None
if 'groups' in job:
group_uuids = job['groups']
group = f'{group_uuid_to_name[group_uuids[0]]} ({group_uuids[0]})' if group_uuids else None
else:
group = None
if application not in applications:
applications[application] = {'usage': {'cpus': 0, 'mem': 0, 'gpus': 0}, 'groups': {}}
applications[application]['usage']['cpus'] += job['cpus']
applications[application]['usage']['mem'] += job['mem']
applications[application]['usage']['gpus'] += job['gpus']
if group not in applications[application]['groups']:
applications[application]['groups'][group] = {'usage': {'cpus': 0, 'mem': 0, 'gpus': 0}, 'jobs': []}
applications[application]['groups'][group]['usage']['cpus'] += job['cpus']
applications[application]['groups'][group]['usage']['mem'] += job['mem']
applications[application]['groups'][group]['usage']['gpus'] += job['gpus']
applications[application]['groups'][group]['jobs'].append(job['uuid'])
return {'count': num_running_jobs,
'applications': applications}
def get_usage_on_cluster(cluster, user):
"""Queries cluster for usage information for the given user"""
params = {'user': user, 'group_breakdown': 'true'}
usage_map = http.make_data_request(cluster, lambda: http.get(cluster, 'usage', params=params))
if not usage_map:
print_error(f'Unable to retrieve usage information on {cluster["name"]} ({cluster["url"]}).')
return {'count': 0}
using_pools = 'pools' in usage_map
pool_names = usage_map['pools'].keys() if using_pools else []
share_map = http.make_data_request(cluster, lambda: http.get(cluster, 'share', params={'user': user}))
if not share_map:
print_error(f'Unable to retrieve share information on {cluster["name"]} ({cluster["url"]}).')
return {'count': 0}
if using_pools != ('pools' in share_map):
print_error(f'Share information on {cluster["name"]} ({cluster["url"]}) is invalid. '
f'Usage information is{"" if using_pools else " not"} per pool, but share '
f'is{"" if not using_pools else " not"}')
return {'count': 0}
if pool_names != (share_map['pools'].keys() if using_pools else []):
print_error(f'Share information on {cluster["name"]} ({cluster["url"]}) is invalid. '
f'Usage information has pools: {pool_names}, but share '
f'has pools: {share_map["pools"].keys()}')
return {'count': 0}
quota_map = http.make_data_request(cluster, lambda: http.get(cluster, 'quota', params={'user': user}))
if not quota_map:
print_error(f'Unable to retrieve quota information on {cluster["name"]} ({cluster["url"]}).')
return {'count': 0}
if using_pools != ('pools' in quota_map):
print_error(f'Quota information on {cluster["name"]} ({cluster["url"]}) is invalid. '
f'Usage information is{"" if using_pools else " not"} per pool, but quota '
f'is{"" if not using_pools else " not"}')
return {'count': 0}
if pool_names != (quota_map['pools'].keys() if using_pools else []):
print_error(f'Quota information on {cluster["name"]} ({cluster["url"]}) is invalid. '
f'Usage information has pools: {pool_names}, but quota '
f'has pools: {quota_map["pools"].keys()}')
return {'count': 0}
def make_query_result(using_pools, usage_map, share_map, quota_map, pool_data=None):
query_result = {'using_pools': using_pools,
'usage': usage_map['total_usage'],
'share': share_map,
'quota': quota_map}
query_result.update(get_job_data(cluster, usage_map))
if pool_data:
query_result.update(pool_data)
return query_result
if using_pools:
pools = http.make_data_request(cluster, lambda: http.get(cluster, 'pools', params={}))
pools_dict = {pool['name']: pool for pool in pools}
for pool_name in pool_names:
if pool_name not in pools_dict or 'state' not in pools_dict[pool_name]:
print_error(f'Pool information on {cluster["name"]} ({cluster["url"]}) is invalid. '
f'Can\'t determine the state of pool {pool_name}')
return {'count': 0}
query_result = {'using_pools': using_pools,
'pools': {pool_name: make_query_result(using_pools,
usage_map['pools'][pool_name],
share_map['pools'][pool_name],
quota_map['pools'][pool_name],
{'state': pools_dict[pool_name]['state']})
for pool_name in pool_names}}
return query_result
else:
return make_query_result(using_pools, usage_map, share_map, quota_map)
def query(clusters, user):
"""
Uses query_across_clusters to make the /usage
requests in parallel across the given clusters
"""
def submit(cluster, executor):
return executor.submit(get_usage_on_cluster, cluster, user)
return query_across_clusters(clusters, submit)
def print_as_json(query_result):
"""Prints the query result as raw JSON"""
print(json.dumps(query_result))
def format_cpus(n):
"""Formats n as a number of CPUs"""
return '{:.1f}'.format(n)
def format_usage(usage_map):
"""Given a "usage map" with cpus, mem, and gpus, returns a formatted usage string"""
cpus = usage_map['cpus']
gpus = usage_map['gpus']
s = f'Usage: {format_cpus(cpus)} CPU{"s" if cpus > 1 else ""}, {format_job_memory(usage_map)} Memory'
if gpus > 0:
s += f', {gpus} GPU{"s" if gpus > 1 else ""}'
return s
def print_formatted_cluster_or_pool_usage(cluster_or_pool, cluster_or_pool_usage):
"""Prints the query result for a cluster or pool in a cluster as a hierarchical set of bullets"""
usage_map = cluster_or_pool_usage['usage']
share_map = cluster_or_pool_usage['share']
quota_map = cluster_or_pool_usage['quota']
print_info(terminal.bold(cluster_or_pool))
format_limit = lambda limit, formatter=(lambda x: x): \
'Unlimited' if limit == sys.float_info.max else formatter(limit)
rows = [
['Quota',
format_limit(quota_map['cpus']),
format_limit(quota_map['mem'], format_memory_amount),
format_limit(quota_map['gpus']),
'Unlimited' if quota_map['count'] == (2 ** 31 - 1) else quota_map['count']],
['Share',
format_limit(share_map['cpus']),
format_limit(share_map['mem'], format_memory_amount),
format_limit(share_map['gpus']),
'N/A'],
['Current Usage',
usage_map['cpus'],
format_job_memory(usage_map),
usage_map['gpus'],
usage_map['jobs']]
]
print_info(tabulate(rows, headers=['', 'CPUs', 'Memory', 'GPUs', 'Jobs'], tablefmt='plain'))
applications = cluster_or_pool_usage['applications']
if applications:
print_info('Applications:')
for application, application_usage in applications.items():
usage_map = application_usage['usage']
print_info(f'- {terminal.running(application if application else "[no application defined]")}')
print_info(f' {format_usage(usage_map)}')
print_info(' Job Groups:')
for group, group_usage in application_usage['groups'].items():
usage_map = group_usage['usage']
jobs = group_usage['jobs']
print_info(f'\t- {terminal.bold(group if group else "[ungrouped]")}')
print_info(f'\t {format_usage(usage_map)}')
print_info(f'\t Jobs: {len(jobs)}')
print_info('')
print_info('')
def print_formatted(query_result):
"""Prints the query result as a hierarchical set of bullets"""
for cluster, cluster_usage in query_result['clusters'].items():
if 'using_pools' in cluster_usage:
if cluster_usage['using_pools']:
for pool, pool_usage in cluster_usage['pools'].items():
state = ' (inactive)' if pool_usage['state'] == 'inactive' else ''
print_formatted_cluster_or_pool_usage(f'{cluster} - {pool}{state}', pool_usage)
else:
print_formatted_cluster_or_pool_usage(cluster, cluster_usage)
def filter_query_result_by_pools(query_result, pools):
"""
Filter query result if pools are provided. Return warning
message if some of the pools not found in any cluster.
"""
clusters = []
known_pools = []
pools_set = set(pools)
filtered_clusters = {}
for cluster, cluster_usage in query_result['clusters'].items():
clusters.append(cluster)
if cluster_usage['using_pools']:
filtered_pools = {}
for pool, pool_usage in cluster_usage['pools'].items():
known_pools.append(pool)
if pool in pools_set:
filtered_pools[pool] = pool_usage
if filtered_pools:
filtered_clusters[cluster] = cluster_usage
cluster_usage['pools'] = filtered_pools
query_result['clusters'] = filtered_clusters
missing_pools = pools_set.difference(set(known_pools))
if missing_pools:
print_error((f"{list(missing_pools)[0]} is not a valid pool in "
if len(missing_pools) == 1 else
f"{' / '.join(missing_pools)} are not valid pools in ") +
(clusters[0]
if len(clusters) == 1 else
' / '.join(clusters)) +
'.')
if query_result['clusters'].items():
print_error('')
return query_result
def usage(clusters, args, _):
"""Prints cluster usage info for the given user"""
guard_no_cluster(clusters)
as_json = args.get('json')
user = args.get('user')
pools = args.get('pool')
query_result = query(clusters, user)
if pools:
query_result = filter_query_result_by_pools(query_result, pools)
if as_json:
print_as_json(query_result)
else:
print_formatted(query_result)
return 0
def register(add_parser, add_defaults):
"""Adds this sub-command's parser and returns the action function"""
parser = add_parser('usage', help='show breakdown of usage by application and group')
parser.add_argument('--user', '-u', help='show usage for a user')
parser.add_argument('--pool', '-p', action='append', help='filter by pool (can be repeated)')
parser.add_argument('--json', help='show the data in JSON format', dest='json', action='store_true')
add_defaults('usage', {'user': current_user()})
return usage
|
apache-2.0
| -8,236,608,904,435,405,000
| 41.416667
| 116
| 0.580959
| false
| 3.953398
| false
| false
| false
|
Karajlug/karajlug
|
news/models.py
|
1
|
2584
|
# coding: utf-8
# -----------------------------------------------------------------------------
# Karajlug.org
# Copyright (C) 2010-2012 Karajlug community
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------------
from django.db import models
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from django.conf import settings
from locales.managers import I18nManager
class News(models.Model):
"""
News module main model
"""
user = models.ForeignKey(User, editable=False,
verbose_name=_("User"))
title = models.CharField(max_length=60,
verbose_name=_("Title"))
content = models.TextField(verbose_name=_("News content"))
lang = models.CharField(_("Language"), max_length=8,
choices=settings.LANGUAGES,
default=settings.LANGUAGE_CODE)
date = models.DateTimeField(auto_now_add=True, auto_now=False,
verbose_name=_('Date and Time'))
objects = I18nManager()
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "/%s/news/%s" % (self.lang, self.id)
#def irc_repr(self, logentry):
# if logentry.is_addition():
# return ["News: %s added by %s at %s" % (
# self.title,
# self.user,
# self.full_path())]
#
# phrase = ""
# if logentry.is_change():
# phrase = "changed"
# elif logentry.is_delete():
# phrase = "deleted"
#
# return ["%s %s a news: %s" % (
# self.user,
# phrase,
# self.full_path())]
class Meta:
verbose_name_plural = _("News")
verbose_name = _('News')
|
gpl-2.0
| -4,674,412,820,575,546,000
| 34.888889
| 79
| 0.560759
| false
| 4.201626
| false
| false
| false
|
Sabinu/IDA
|
IDA.py
|
1
|
12694
|
import sublime
import sublime_plugin
import os.path
# import filecmp
# import shutil
# import time
# import json
from subprocess import Popen, PIPE
from collections import namedtuple as nt
import xml.etree.ElementTree as ET
import webbrowser
import urllib
scripts = {"Script_1D": "0 Master Script",
"Script_2D": "1 Script 2D",
"Script_3D": "2 Script 3D",
"Script_VL": "3 Parameter Script",
"Script_UI": "4 Interface Script",
"Script_PR": "5 Properties Script",
"Script_FWM": "6 Forward Migration",
"Script_BWM": "7 Backward Migration"}
def clip_path(path, folder):
''' clips path sequence at folder
returns path from folder(except) to the end, except file
'''
path = path.split(os.sep)
clip = path.index(folder)
output = os.sep.join(path[clip + 1:])
return output
class IDACommand(sublime_plugin.WindowCommand):
def __init__(self, *args):
''' get all project info '''
super().__init__(*args)
self.project_info = self.window.extract_variables()
self.platform = self.project_info.get('platform', None)
self.project_path = self.project_info.get('project_path', None)
self.project_name = self.project_info.get('project_base_name', None)
self.current_object = self.project_info.get('file_path', None)
self.settings = sublime.load_settings('Default ({}).sublime-settings'.format(self.platform))
self.lp_xml_converter = self.settings.get('LP_XML_Converter')
self.objects = None # TODO try to ingest objects at __init__
self.initiate_folders()
def initiate_folders(self):
''' initiates paths of needed folders. '''
if self.project_name is None:
return
self.folder_backup = self.project_path + os.sep + self.project_name + '.backup'
self.folder_images = self.project_path + os.sep + 'images'
self.folder_code = self.project_path + os.sep + 'CODE'
self.folder_library = self.project_path + os.sep + self.project_name + '.library'
self.folder_xml = self.project_path + os.sep + self.project_name + '.xml'
def check_project(self, output=True):
''' Check if user is in a project.
returns: True or False.
'''
if self.project_name is None:
if output:
sublime.error_message('IDA Message:\nYou are not in a Project\nPlease work inside a project.')
return False
else:
return True
# ======================================================
def get_lp_xml(self):
''' TODO
returns the path/adress of the LP_XML Converter.
'''
if self.lp_xml_converter is None:
self.window.show_input_panel('Archicad Version:', '19', self.done_lp_xml, self.change_lp_xml, self.cancel_lp_xml)
return self.lp_xml_converter
def done_lp_xml(self, ac_version):
print('Got: {}'.format(ac_version))
def change_lp_xml(self, ac_version):
print('Changed: {}'.format(ac_version))
def cancel_lp_xml(self):
print('LP_XML_Converter was not given.')
# ======================================================
def list_lp_xml(self):
''' INSPECTION method
prints the path of the LP_XML Converter to the console.
'''
print('>>> LP_XML_Converter: {}'.format(self.get_lp_xml()))
def list_project_info(self):
''' INSPECTION Method
prints the project info to the console.
'''
print(60 * '=')
print('PROJECT INFO')
print(60 * '=')
for k, v in self.project_info.items():
print('{:<25}: {}'.format(k, v))
# ========= BLEND WITH LIST_OBJECTS ==================================
def get_tree(self, walk=None, folder=None):
''' TODO
must return a clean list of objects(name & path)
regardless of the source of the walk.
which could only be from: `backup`, `library`, `code` or `xml` folders
'''
folder = folder.split(os.sep)[-1]
tree = []
library_part = nt('library_part', 'path name')
for i in walk:
for f in i[2]:
if f[0] != '.':
tree.append(library_part(path=clip_path(i[0], folder), name=f))
# if self.tree is None: # TODO make tree available in JSON file, as reference
# self.tree = tree
return tree
def list_objects(self, folder=None, output=False):
''' TODO
must output all objects in specified folder
returns a list with objects(name & path)
'''
print(60 * '=')
print('GSM OBJECTS in {}'.format(folder.split(os.sep)[-1]))
print(60 * '=')
walk = list(os.walk(folder))
tree = self.get_tree(walk, folder)
for i in tree:
print('{:<30}: {}'.format(i[0], i[1]))
return tree
# ========= BLEND WITH LIST_OBJECTS ==================================
def make_all(self):
''' makes all objects in project
transforms all source folders in .gsm files
'''
if os.path.isfile(self.lp_xml_converter):
output = None
p = Popen([self.lp_xml_converter,
'x2l',
'-img',
self.folder_images,
self.folder_xml,
self.project_library], stdout=PIPE)
# TODO add password option
output = p.communicate()[0]
output = output.decode("utf-8")[:-1]
output = output.replace('\r', '')
print("Making all objects from library.")
print(output)
def import_all(self):
''' gets all objects from library folder
puts them in the xml folder
'''
if os.path.isfile(self.lp_xml_converter):
output = None
p = Popen([self.lp_xml_converter,
'l2x',
'-img',
self.folder_images,
self.folder_library,
self.folder_xml], stdout=PIPE)
# TODO add password option
output = p.communicate()[0]
output = output.decode("utf-8")[:-1]
output = output.replace('\r', '')
print(60 * '+')
print("Importing all objects from library.")
print(output)
# TODO Check Import with tree
# TODO Asimilate objects
else:
# TODO
sublime.error_message('IDA Message:\nRectify LP_XML Location not implemented.')
class IdaNewObjectCommand(IDACommand):
def run(self):
sublime.error_message('IDA Message:\nFunction not implemented.')
class IdaCurrentMakeCommand(IDACommand):
def run(self):
sublime.error_message('IDA Message:\nFunction not implemented.')
class IdaCurrentImportCommand(IDACommand):
def run(self):
sublime.error_message('IDA Message:\nFunction not implemented.')
class IdaAllMakeCommand(IDACommand):
def run(self):
if not self.check_project():
return
print(60 * '+')
print('IDA Make All')
print(60 * '+')
# TODO ok for now, but source should be CODE_folder
# This means only scripts in XML will be 'Made'
objects = self.list_objects(self.folder_xml)
print(60 * '=')
for lp in objects:
# TODO <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< THIS IS WHERE YOU BEGIN
folder_name = self.folder_xml + '.test'
filename = '{}/{}{}'.format(folder_name, lp.path, lp.name)
xml_file = '{}/{}/{}'.format(self.folder_xml, lp.path, lp.name)
# TODO try to put this in method, make structure at given folder ===
lp_name = lp.name.split('.')[0] # this seems pointless
try:
os.makedirs('{}/{}/{}'.format(folder_name, lp.path, lp_name))
except:
pass
# ==================================================================
with open(xml_file, 'r', encoding='utf-8') as obj_file:
xml = obj_file.read()
lp_root = ET.fromstring(xml)
# You've got to look in the .CODE folder for the number of scripts to insert
s_num = 0
for script_name in scripts:
t = lp_root.find('.//' + script_name).text
t = t[2:-2]
if t != '':
s_num += 1
script_file = '{}/{}/{}.CODE/{}.gdl'.format(self.folder_code, lp.path, lp_name, scripts[script_name])
with open(script_file, 'w', encoding='utf-8') as scr_file:
scr_file.write(t)
print('Imported {} Scripts from: {}'.format(s_num, lp_name))
# self.import_all() # <<<<<<<<<<<<<<<< MAKE ON
print(60 * '+')
class IdaAllImportCommand(IDACommand):
''' Imports All Objects from `project.LIBRARY` >> `CODE` folder
'''
def run(self):
if not self.check_project():
return
self.import_all()
print(60 * '+')
print('IDA Import All')
print(60 * '+')
# TODO this is redundant, objects should already be checked and in self.objects.
objects = self.list_objects(self.folder_xml) # TODO Maybe this should be renamed tree_from_folder
print(60 * '=')
for lp in objects:
filename = '{}/{}/{}'.format(self.folder_xml, lp.path, lp.name)
# TODO try to put this in method, make structure at given folder ===
lp_name = lp.name.split('.')[0] # this seems pointless
try:
os.makedirs('{}/{}/{}.CODE'.format(self.folder_code, lp.path, lp_name))
except:
pass
# ==================================================================
with open(filename, 'r', encoding='utf-8') as obj_file:
xml = obj_file.read()
lp_root = ET.fromstring(xml)
# self.unpack_object(lp, lp_root)
s_num = 0
for script_name in scripts:
found_script = lp_root.find('.//' + script_name)
if found_script is not None:
t = found_script.text
t = t[2:-2]
else:
t = ''
if t != '':
s_num += 1
script_file = '{}/{}/{}.CODE/{}.gdl'.format(self.folder_code, lp.path, lp_name, scripts[script_name])
with open(script_file, 'w', encoding='utf-8') as scr_file:
scr_file.write(t)
print('Imported {} Scripts from: {}'.format(s_num, lp_name))
# for i in list(lp_root.iter()):
# print(i)
print(60 * '+')
class IdaLibraryMakeCommand(IDACommand):
def run(self):
sublime.error_message('IDA Message:\nFunction not implemented.')
class IdaLibraryUnpackCommand(IDACommand):
def run(self):
sublime.error_message('IDA Message:\nFunction not implemented.')
# class IdaGdlDocsCommand(IDACommand):
# def run(self):
# window = self.window
# view = window.active_view()
# sel = view.sel()
# region = sel[0]
# print(region.a, region.b)
# print('>>')
# word = view.word(region)
# print('<<')
# selectionText = view.substr(word)
# print('+' + selectionText + '+')
class IdaGdlDocsCommand(sublime_plugin.TextCommand):
def run(self, selected_text):
try:
selections = self.view.sel()
if selections:
needle = self.view.substr(selections[0])
if len(needle) == 0:
print("IDA: You did not select text. Try again.")
else:
url = "http://gdl.graphisoft.com/?s=" + needle
url = urllib.parse.urlparse(url).geturl()
user_message = "IDA: Performing search: '" + needle + "'"
print(user_message)
sublime.status_message(user_message)
webbrowser.open(url)
else:
print("IDA: You did not select text. Try again.")
sublime.status_message("IDA: Text was not selected")
except Exception as e:
print("IDA: There was an error during the execution of the plugin.\n")
sublime.status_message("IDA: Open console for info")
raise e
|
mit
| 7,978,942,153,967,970,000
| 37.583587
| 125
| 0.520561
| false
| 4.019633
| false
| false
| false
|
gt-ros-pkg/rcommander-pr2
|
rcommander_ar_tour/src/rcommander_ar_tour/detect_robot_move.py
|
1
|
1925
|
import sensor_msgs.msg as sm
import numpy as np
import copy
import rospy
class DetectRobotMove:
JOINT_TO_WATCH = ['fl_caster_rotation_joint', 'fl_caster_l_wheel_joint',
'fl_caster_r_wheel_joint', 'fr_caster_rotation_joint',
'fr_caster_l_wheel_joint', 'fr_caster_r_wheel_joint',
'bl_caster_rotation_joint', 'bl_caster_l_wheel_joint',
'bl_caster_r_wheel_joint', 'br_caster_rotation_joint',
'br_caster_l_wheel_joint', 'br_caster_r_wheel_joint',
'torso_lift_joint', 'torso_lift_motor_screw_joint',
'head_pan_joint', 'head_tilt_joint']
#CONSTANTLY_MOVING_JOINTS = ['laser_tilt_mount_joint', 'l_upper_arm_roll_joint', 'r_upper_arm_roll_joint', 'r_gripper_motor_slider_joint', 'l_gripper_motor_slider_joint']
TIME_TO_WAIT_AFTER_MOVING = 3.
def __init__(self):
self.last_msg = None
self.joint_dict = {}
self.is_moving_time = rospy.get_rostime().to_time()
self.watch_idx = []
sub = rospy.Subscriber("joint_states", sm.JointState, self.joint_state_cb)
def joint_state_cb(self, msg):
if self.last_msg == None:
self.last_msg = msg
for idx, n in enumerate(msg.name):
self.joint_dict[n] = idx
for n in DetectRobotMove.JOINT_TO_WATCH:
self.watch_idx.append(self.joint_dict[n])
vels = np.array(copy.deepcopy(msg.velocity))
nlist = np.array(msg.name)
moving = np.abs(vels) > 0.01
watch_joints = moving[self.watch_idx]
watch_joints_n = nlist[self.watch_idx]
if watch_joints.any():
self.is_moving_time = msg.header.stamp.to_time()
def is_moving(self):
return (rospy.get_rostime().to_time() - self.is_moving_time) < DetectRobotMove.TIME_TO_WAIT_AFTER_MOVING
|
bsd-3-clause
| -4,458,813,909,213,405,000
| 40.847826
| 174
| 0.58026
| false
| 3.176568
| false
| false
| false
|
airanmehr/Utils
|
Hyperoxia.py
|
1
|
10317
|
import numpy as np
import pandas as pd
pd.options.display.expand_frame_repr = False
import UTILS.Util as utl
import UTILS.Plots as pplt
import pylab as plt
import seaborn as sns
path=utl.home+'storage/Data/Dmelanogaster/OxidativeStress/'
CHROMS=['2L','2R','3L','3R','X','4']
pops={'C':'Control','H':'Hyperoxia','L':'Hypoxia'}
taus=['1-31', '7-61', '31-61', '61-114', '114-180']
def X(D=False,C=False):
a=rename(pd.read_pickle('/home/arya/fly/all/RC/all.df'))#.stack([0, 1, 2])
a=a[a.H.xs('D', 1, 2).min(1)>9]
a=a.loc[CHROMS].loc[:, pd.IndexSlice[:, [1, 4, 7, 12, 17, 31, 61, 114, 180]]].dropna()
if D: return a.xs('D', 1, 3)
if C: return a.xs('C', 1, 3)
a = (a.xs('C', 1, 3) / a.xs('D', 1, 3)).round(2)
return a.dropna()
def rename(c):
def one(x):
if 'final' in x:
gen = {1: 1, 2: 7, 3: 12, 4: 15, 5: 31, 6: 61, 7: 114}
x = x[1:].split('_')
return 'H', gen[int(x[0])], int(x[1].split('.')[0])
if 'Clean' in x:
x = x.split('_')[1:]
return x[0][0], 180, int(x[0][-1])
ash=utl.execute('cat /home/arya/fly/F4-17/SraRunTable.tsv | cut -f7,9').iloc[1:].set_index(0)[1]
return ash.apply(lambda x: x[1:]).apply(lambda x: (x[-2].replace('H','L'),int(x[:-2]),int(x[-1]) )).loc[x]
if len(c.columns)==1:
c.columns = pd.MultiIndex.from_tuples(map(one, c.columns), names=['POP', 'GEN', 'REP'])
else:
cols= [x+(y,) for x,y in zip(map(one, c.columns.get_level_values(0)), c.columns.get_level_values(1))]
c.columns=pd.MultiIndex.from_tuples(cols, names=['POP', 'GEN', 'REP','READ'])
return c.sort_index(1)
def fixcols(a):
gmap={7:1,12:7,31:12,61:31,114:61,180:114}
a.columns=pd.Series(a.columns).apply(lambda x: '{}-{}'.format( gmap[int(x.split('-')[0])],x.split('-')[-1])).tolist()
return a
def PCA(x):
X=utl.pcaX(x.dropna().T,2)
c=pd.DataFrame(map(lambda x: list(x)[:2],X.index)).drop_duplicates().set_index([0,1]).sort_index()
marker=pd.Series(pplt.getMarker(c.index.levels[1].size,False),index=c.index.levels[1])
for xx in marker.index: c.loc[pd.IndexSlice[:,xx],'m']=marker.loc[xx]
c.loc['L', 'c'] = 'darkblue'
c.loc['H', 'c'] = 'r'
c.loc['C', 'c'] = 'g'
fig=plt.figure(dpi=150);ax=plt.gca()
for i in c.index:
if i[1] =='': continue
X.sort_index().loc[i].plot.scatter(x=0,y=1,c=c.loc[i].c,label='{}.{}'.format(i[0],i[1]),ax=ax,s=70,alpha=0.6,marker=c.loc[i].m)
plt.xlabel('PC1');plt.ylabel('PC2');
plt.title('Genomewide PCA (H:Hyperoxia, C:Control, L:Hypoxia) of Flies');
plt.gcf().axes[0].legend(frameon=True, bbox_to_anchor=(1,1),ncol=1);
def getFixationCutoffs():
steps = pd.Series(0, taus).groupby(level=0).apply(
lambda x: int(x.name.split('-')[1]) - int(x.name.split('-')[0]) - 1)
import CLEAR.Libs.Markov as mkv
def getFreqCutoff(tau):
T = mkv.Markov.computePower(mkv.Markov.computeTransition(0, 100, 50), tau)
p = T[.95].cumsum() / T[.95].cumsum()[1]
return p[p > 0.01].index[0]
return steps.apply(getFreqCutoff).sort_values()
def getHueEpistatis(z,t):
t1=z.columns[-1]
i0=(z[t]<0.11)
import seaborn as sns
cm=sns.color_palette("colorblind", 6)
colors = ['k']+[cm[2]]+[cm[1]]
hue=pd.concat([i0,~i0&(z[t1]<0.5),~i0&(z[t1]>0.5)],1,keys=[0,1,2]).apply(lambda x: x.idxmax(),1).rename(0).reset_index().rename(columns={'POS':'index'})
hue['c']=hue.apply(lambda x: colors[x[0]],1)
return hue
def oversample(x,L=1e5,start=0):
np.random.seed(0)
z=pd.concat([x,x]).sample(frac=1)
z.index=sorted(np.random.choice(int(L), z.shape[0], replace=False)+start)
z=z[~z.index.duplicated()]
z.index=map(int,z.index)
return z
def plotHaplotypes(x,hue=None,track=True,t=130,lite=True,traj=True , km=None,ss=-1,distinguishHapsInTracks=False,CHROM=None,recPos=None,ntracks=6,clean=True,fold=True,freqAt=200,maxGen=200):
freqAt = x.columns[pd.Series(x.columns - freqAt).abs().idxmin()]
try:
t=x.columns[x.columns>=t][0]
except:
t=x.columns[x.shape[1]//2]
print 'Warining: t=x.columns[x.columns>=t][0]'
xf=x.copy(True).fillna(0)
i=xf[t]>0.5
xf[i]=1-xf[i]
haps = utl.kmeans(xf[t], km)
h12 = haps.unique()
if clean and km==2:
cf = np.mean(haps.value_counts().index)
drop =utl.TI((haps == min(h12)) & (xf[t] > cf - 0.05)).tolist()+ utl.TI((haps == max(h12)) & (xf[t] < cf + 0.05)).tolist()
xf = xf.drop(drop)
haps = haps.drop(drop)
if hue is None and not (km is None):
if km >1:
t2=x.columns[x.columns>t][0]
splitted=0
if xf[haps==h12[0]][t2].mean()>xf[haps==h12[1]][t2].mean():
splitted=1
sp=haps == h12[splitted]
ff = lambda (x, c, k): pd.DataFrame(([(y, c, k) for y in utl.TI(x)]), columns=['index', 'c', 0])
cm = sns.color_palette("colorblind", 6)
hue=pd.concat(map(ff,[(~sp,'k',0),((sp) &( xf[t2]>0.5),cm[1],1),((sp) &( xf[t2]<0.5),cm[2],2)]))
# else:
# hue=getHueEpistatis(xf,t)
REALDATA=CHROM is not None
if not REALDATA:ax=plt.subplots(1,2,figsize=(8,3),dpi=120)[1]
if traj:
if REALDATA:
pplt.Trajectory.Fly(xf, hue=hue, subsample=ss,)
plt.gca().set_title('')
else:
xx=xf
if not fold:
xx=x.loc[:,x.columns<=freqAt]
xx.loc[:,maxGen+1]=np.nan
pplt.Trajectory.Fly(xx, logscale=False, hue=hue, subsample=ss, ax=ax[0])
if distinguishHapsInTracks:
jj=(xf[t]>0.1) & (xf.iloc[:,-1]<0.1)
xf[jj]-=0.01
if lite:
j=(x[[t,x.columns[-1]]].sum(1)-1).abs()<0.9
else:
j=haps.fillna(0)>-np.inf
xf.index.name='POS'
if track:
if REALDATA:
if hue is not None:
pplt.plotTracks(haps[j], ntracks=ntracks,dmel=5,CHROM=CHROM, markersize=8, ymin=-0.07, ymax=1.03, hue=hue, alpha=0.3,genesToColor=[]);
else:
pplt.plotTracks(haps[j], ntracks=ntracks, dmel=5, CHROM=CHROM, markersize=8, ymin=-0.07, ymax=1.03, alpha=0.3, genesToColor=[]);
plt.gcf().axes[0].set_ylabel('Frequency\nat Gen. 114')
plt.tight_layout(pad=0)
else:
if hue is not None:
if fold:
pplt.plotTracks(haps[j], ntracks=-1, markersize=8, ymin=-0.07, ymax=1.03, hue=hue, alpha=0.3, ax=ax[1]);
else:
pplt.plotTracks(x[freqAt], ntracks=-1, markersize=8, ymin=-0.07, ymax=1.03, hue=hue, alpha=0.3,ax=ax[1]);
else:
pplt.plotTracks(haps[j], ntracks=-1, markersize=8, ymin=-0.07, ymax=1.03,colors='k', alpha=0.3, ax=ax[1]);
ax[1].set_ylabel(''); ax[1].set_yticks([])
ax[0].set_title('')
ax[1].set_xlabel('Position')
if fold:
ax2 = ax[1].twinx()
ax2.set_ylabel('Frequency at Gen. 150')
map(lambda x: pplt.setSize(x,12),list(ax)+[ax2])
else:
map(lambda x: pplt.setSize(x, 12), list(ax) )
ax[1].set_xlim([xf.index.min()-2000,xf.index.max()+2000])
plt.tight_layout(pad=0.1)
if recPos:
plt.gcf().axes[0].axvline(recPos, c='r', alpha=0.4)
plt.gcf().axes[1].axvline(recPos, c='r', alpha=0.4)
def plotSimupopRec(df,bapos,title='',k=500):
np.random.seed(0)
def plot(DF, ba, before=None, ax=None):
show = False
if ax is None:
ax = plt.subplots(1, 1, figsize=(6, 4), dpi=100)
show = True
if before is None:
df = DF
else:
if before > 0:
df = DF.loc[:, DF.columns < before]
else:
df = DF.loc[:, DF.columns > -before]
if k > 0:
df2 = df.T.sample(k).T
else:
df2 = df
df2.plot(legend=False, c='k', alpha=0.1, ax=ax);
DF[[ba]].plot(legend=False, c='r', alpha=0.92, ax=ax);
if show:
plt.show()
ax = plt.subplots(1, 2, figsize=(8, 4), dpi=100)[1];
plot(df, bapos, 5e4, ax=ax[0]);
plot(df, bapos, -5e4, ax=ax[1])
ax[0].set_title('Left')
ax[1].set_title('Right')
ax[0].set_ylabel('Freq')
plt.suptitle(title)
def one(H,bapos,r=1e-8,plot=False,recAtPos=None):
import UTILS.Simulation as sim
recombinator=None
if recAtPos is not None:recombinator = sim.Simulation.Recombinator(r,recAtPos)
a=sim.Simulation(H0=H,numReplicates=1,s=0.25,recombinator=recombinator,posUnderSelection=bapos,maxGeneration=60,generationStep=5,N=200,ExperimentName='V',save=False,seed=0)
df=a.df.T;
if plot:
df=df.loc[:,sorted([bapos] + utl.TI(utl.MAF(df.iloc[0])>0.05).tolist())]
plotSimupopRec(df, bapos,'r={}'.format(r))
return (df.loc[:,df.columns>5e4].iloc[-1]>0.5).sum()
def evolve_recombine_only_at(H,recAti,bapos,r=1e-2, atGen=None,maxGen=50,plot=False,seed=None):
import UTILS.Simulation as sim
bai=np.where(H.columns==bapos)[0][0]
if recAti is None:
recombinator = sim.sim.Recombinator(intensity=r)
else:
recombinator = sim.Simulation.Recombinator(r, recAti)
pop=sim.POP.createISOGenicDiploidPopulation(H);X=[sim.POP.freq(pop).rename(0)];t=1
Args=lambda pop:{'pop': pop, 'gen': 1, 's': .6, 'h': 0.5, 'siteUnderSelection': bai,'seed':seed, 'r': 0}
if atGen is None:
for _ in range(maxGen):
pop = sim.Simulation._simualtePop(recombinator=recombinator, **Args(pop));
X += [sim.POP.freq(pop).rename(t)];
t += 1
else:
for _ in range(atGen):
pop=sim.Simulation._simualtePop(**Args(pop));
X += [sim.POP.freq(pop).rename(t)];t+=1
pop=sim.Simulation._simualtePop(recombinator=recombinator,**Args(pop));X += [sim.POP.freq(pop).rename(t)];t+=1
for _ in range(atGen+1,maxGen):
pop=sim.Simulation._simualtePop(**Args(pop));X += [sim.POP.freq(pop).rename(t)];t+=1
df=pd.concat(X,1).T;df.index.name='TIME';
if plot:
plotSimupopRec(df, bapos,'r={} atGen={} seed={}'.format(r,atGen,seed))
for ax in plt.gcf().axes:
ax.axvline(atGen,c='b')
# plt.show()
return df
|
mit
| -7,256,639,315,684,374,000
| 38.231939
| 190
| 0.557817
| false
| 2.601362
| false
| false
| false
|
bil9000/dust
|
volume_snapshot.py
|
1
|
1605
|
import boto3
#Describe your regions here
region_list = ['us-west-2']
#For creating snapshots
def create_snapshot(volume):
Description='Created for volume '+volume
client = boto3.client('ec2')
response = client.create_snapshot(
DryRun=False,
VolumeId=volume,
Description=Description
)
def take_snapshots():
client = boto3.client('ec2')
#Iterate over regions listed
for region in region_list:
print("\n"+"#"*60+" "+region+" "+"#"*60+"\n")
client = boto3.client('ec2', region_name=region)
#Check for ElasticSearch Instances
response = client.describe_instances(
Filters=[
{
'Name': 'tag:Name',
'Values': ['ElasticSearch']
}
])
#Iterate over instance(s)
for r in response['Reservations']:
for inst in r['Instances']:
inst_id=inst['InstanceId']
tags=inst['Tags']
volumes=inst['BlockDeviceMappings']
volume_name=[]
#Iterate over instance's volume(s)
for volume in volumes:
volume_name.append(volume)
print("Instance's volumes: ",volume_name)
for volume in volumes:
volume=volume['Ebs']['VolumeId']
print("Creating snapshot for volume: ",volume)
t = create_snapshot(volume)
if __name__ == "__main__":
try:
take_snapshots()
except Exception as err:
print(err)
|
apache-2.0
| -278,141,583,924,431,840
| 28.740741
| 66
| 0.523988
| false
| 4.445983
| false
| false
| false
|
DailyActie/Surrogate-Model
|
01-codes/pyOpt-1.2.0/pyOpt/pyMMA/pyMMA.py
|
1
|
19464
|
#!/usr/bin/env python
'''
pyMMA - A Python pyOpt interface to MMA.
Copyright (c) 2008-2014 by pyOpt Developers
All rights reserved.
Revision: 1.4 $Date: 21/06/2010 21:00$
Tested on:
---------
Linux with g77
Linux with gfortran
Linux with pathf95
Win32 with g77
Mac with g95
Developers:
-----------
- Mr. Andrew Lambe (AL)
- Dr. Ruben E. Perez (RP)
- Mr. Peter Jansen (PJ)
History
-------
v. 1.0 - Initial Class Creation (AL, 2005)
v. 1.1 - Extensive Functionality Updates (RP, 2008)
v. 1.2 - Migrate to pyOpt Framework (RP, 2008)
v. 1.3 - History support (PJ,RP, 2010)
v. 1.4 - Gradient Class Support (PJ,RP, 2010)
'''
__version__ = '$Revision: $'
'''
To Do:
- add unconstrained problems support
'''
# =============================================================================
# MMA Library
# =============================================================================
try:
import mma
except:
raise ImportError('MMA shared library failed to import')
# end
# =============================================================================
# Standard Python modules
# =============================================================================
import copy
import os
import time
# =============================================================================
# External Python modules
# =============================================================================
import numpy
# =============================================================================
# Extension modules
# =============================================================================
from pyOpt import Optimizer
from pyOpt import Gradient
# =============================================================================
# Misc Definitions
# =============================================================================
inf = 10.E+20 # define a value for infinity
# =============================================================================
eps = 1.0 # define a value for machine precision
while ((eps / 2.0 + 1.0) > 1.0):
eps = eps / 2.0
# end
eps = 2.0 * eps
# eps = math.ldexp(1,-52)
# =============================================================================
# MMA Optimizer Class
# =============================================================================
class MMA(Optimizer):
'''
MMA Optimizer Class - Inherited from Optimizer Abstract Class
'''
def __init__(self, pll_type=None, *args, **kwargs):
'''
MMA Optimizer Class Initialization
**Keyword arguments:**
- pll_type -> STR: Parallel Implementation (None, 'POA'-Parallel Objective Analysis), *Default* = None
Documentation last updated: Feb. 16, 2010 - Peter W. Jansen
'''
#
if (pll_type == None):
self.poa = False
elif (pll_type.upper() == 'POA'):
self.poa = True
else:
raise ValueError("pll_type must be either None or 'POA'")
# end
#
name = 'MMA'
category = 'Local Optimizer'
def_opts = {
# MMA Options
'MAXIT': [int, 1000], # Maximum Iterations
'GEPS': [float, 1e-6], # Dual Objective Gradient Tolerance
'DABOBJ': [float, 1e-6], #
'DELOBJ': [float, 1e-6], #
'ITRM': [int, 2], #
'IPRINT': [int, 1], # Output Level (<0 - None, 0 - Screen, 1 - File)
'IOUT': [int, 6], # Output Unit Number
'IFILE': [str, 'MMA.out'], # Output File Name
}
informs = {
0: 'The optimality conditions are satisfied.',
1: 'The algorithm has been stopped after MAXIT iterations.',
}
Optimizer.__init__(self, name, category, def_opts, informs, *args, **kwargs)
def __solve__(self, opt_problem={}, sens_type='FD', store_sol=True, disp_opts=False, store_hst=False,
hot_start=False, sens_mode='', sens_step={}, *args, **kwargs):
'''
Run Optimizer (Optimize Routine)
**Keyword arguments:**
- opt_problem -> INST: Optimization instance
- sens_type -> STR/FUNC: Gradient type, *Default* = 'FD'
- store_sol -> BOOL: Store solution in Optimization class flag, *Default* = True
- disp_opts -> BOOL: Flag to display options in solution text, *Default* = False
- store_hst -> BOOL/STR: Flag/filename to store optimization history, *Default* = False
- hot_start -> BOOL/STR: Flag/filename to read optimization history, *Default* = False
- sens_mode -> STR: Flag for parallel gradient calculation, *Default* = ''
- sens_step -> FLOAT: Sensitivity setp size, *Default* = {} [corresponds to 1e-6 (FD), 1e-20(CS)]
Additional arguments and keyword arguments are passed to the objective function call.
Documentation last updated: February. 2, 2011 - Peter W. Jansen
'''
#
if ((self.poa) and (sens_mode.lower() == 'pgc')):
raise NotImplementedError(
"pyMMA - Current implementation only allows single level parallelization, either 'POA' or 'pgc'")
# end
if self.poa or (sens_mode.lower() == 'pgc'):
try:
import mpi4py
from mpi4py import MPI
except ImportError:
print 'pyMMA: Parallel objective Function Analysis requires mpi4py'
# end
comm = MPI.COMM_WORLD
nproc = comm.Get_size()
if (mpi4py.__version__[0] == '0'):
Bcast = comm.Bcast
elif (mpi4py.__version__[0] == '1'):
Bcast = comm.bcast
# end
self.pll = True
self.myrank = comm.Get_rank()
else:
self.pll = False
self.myrank = 0
# end
myrank = self.myrank
#
def_fname = self.options['IFILE'][1].split('.')[0]
hos_file, log_file, tmp_file = self._setHistory(opt_problem.name, store_hst, hot_start, def_fname)
#
gradient = Gradient(opt_problem, sens_type, sens_mode, sens_step, *args, **kwargs)
# ======================================================================
# MMA - Objective/Constraint Values and Gradients Function
# ======================================================================
def func(m, n, xval, f0val, df0dx, fval, dfdx):
# Variables Groups Handling
if opt_problem.use_groups:
xg = {}
for group in group_ids.keys():
if (group_ids[group][1] - group_ids[group][0] == 1):
xg[group] = xval[group_ids[group][0]]
else:
xg[group] = xval[group_ids[group][0]:group_ids[group][1]]
# end
# end
xn = xg
else:
xn = xval
# end
# Flush Output Files
self.flushFiles()
# Evaluate User Function (Real Valued)
fail = 0
f = []
g = []
if (myrank == 0):
if self.h_start:
[vals, hist_end] = hos_file.read(ident=['obj', 'con', 'fail'])
if hist_end:
self.h_start = False
hos_file.close()
else:
[f, g, fail] = [vals['obj'][0][0], vals['con'][0], int(vals['fail'][0][0])]
# end
# end
# end
if self.pll:
self.h_start = Bcast(self.h_start, root=0)
# end
if self.h_start and self.pll:
[f, g, fail] = Bcast([f, g, fail], root=0)
elif not self.h_start:
[f, g, fail] = opt_problem.obj_fun(xn, *args, **kwargs)
# end
# Store History
if (myrank == 0):
if self.sto_hst:
log_file.write(xval, 'x')
log_file.write(f, 'obj')
log_file.write(g, 'con')
log_file.write(fail, 'fail')
# end
# end
# Gradients
if self.h_start:
df = []
dg = []
if (myrank == 0):
[vals, hist_end] = hos_file.read(ident=['grad_obj', 'grad_con'])
if hist_end:
self.h_start = False
hos_file.close()
else:
df = vals['grad_obj'][0].reshape(
(len(opt_problem._objectives.keys()), len(opt_problem._variables.keys())))
dg = vals['grad_con'][0].reshape(
(len(opt_problem._constraints.keys()), len(opt_problem._variables.keys())))
# end
# end
if self.pll:
self.h_start = Bcast(self.h_start, root=0)
# end
if self.h_start and self.pll:
[df, dg] = Bcast([df, dg], root=0)
# end
# end
if not self.h_start:
#
df, dg = gradient.getGrad(xval, group_ids, [f], g, *args, **kwargs)
# end
# Store History
if self.sto_hst and (myrank == 0):
log_file.write(df, 'grad_obj')
log_file.write(dg, 'grad_con')
# end
# Objective Assigment
if isinstance(f, complex):
f0val = f.astype(float)
else:
f0val = f
# end
# Constraints Assigment
for i in xrange(len(opt_problem._constraints.keys())):
if isinstance(g[i], complex):
fval[i] = g[i].astype(float)
else:
fval[i] = g[i]
# end
# end
# Gradients Assigment
k = 0
for i in xrange(len(opt_problem._variables.keys())):
if isinstance(df[0, i], complex):
df0dx[i] = df[0, i].astype(float)
else:
df0dx[i] = df[0, i]
# end
for jj in xrange(len(opt_problem._constraints.keys())):
if isinstance(dg[jj, i], complex):
dfdx[k] = dg[jj, i].astype(float)
else:
dfdx[k] = dg[jj, i]
# end
k += 1
# end
# end
return f0val, df0dx, fval, dfdx
# Variables Handling
n = len(opt_problem._variables.keys())
xmin = []
xmax = []
xval = []
for key in opt_problem._variables.keys():
if (opt_problem._variables[key].type == 'c'):
xmin.append(opt_problem._variables[key].lower)
xmax.append(opt_problem._variables[key].upper)
xval.append(opt_problem._variables[key].value)
elif (opt_problem._variables[key].type == 'i'):
raise IOError('MMA cannot handle integer design variables')
elif (opt_problem._variables[key].type == 'd'):
raise IOError('MMA cannot handle discrete design variables')
# end
# end
xmin = numpy.array(xmin)
xmax = numpy.array(xmax)
xval = numpy.array(xval)
# Variables Groups Handling
group_ids = {}
if opt_problem.use_groups:
k = 0
for key in opt_problem._vargroups.keys():
group_len = len(opt_problem._vargroups[key]['ids'])
group_ids[opt_problem._vargroups[key]['name']] = [k, k + group_len]
k += group_len
# end
# end
# Constraints Handling
m = len(opt_problem._constraints.keys())
neqc = 0
# fval = []
fmax = []
if m > 0:
for key in opt_problem._constraints.keys():
if opt_problem._constraints[key].type == 'e':
raise IOError('MMA cannot handle equality constraints')
# neqc += 1
# end
# fval.append(opt_problem._constraints[key].value)
fmax.append(opt_problem._constraints[key].upper)
# end
else:
raise IOError('MMA support for unconstrained problems not implemented yet')
# end
# fval = numpy.array(fval)
fmax = numpy.array(fmax)
# Objective Handling
objfunc = opt_problem.obj_fun
nobj = len(opt_problem._objectives.keys())
f0val = []
for key in opt_problem._objectives.keys():
f0val.append(opt_problem._objectives[key].value)
# end
f0val = numpy.array(f0val)
# Setup argument list values
xmma = numpy.zeros([n], numpy.float)
# Space used internally by the program
# for the asymptotes (xlow and xupp) and
# computed bounds on x (alpha and beta)
xlow = numpy.zeros([n], numpy.float)
xupp = numpy.zeros([n], numpy.float)
alfa = numpy.zeros([n], numpy.float)
beta = numpy.zeros([n], numpy.float)
# The objective and constraint function
# values and space for the gradients
fval = numpy.zeros([m], numpy.float)
df0dx = numpy.zeros([n], numpy.float)
dfdx = numpy.zeros([m * n], numpy.float)
# Space for the coefficients and artificial
# variables to be computed (set to default values)
p = numpy.zeros([m * n], numpy.float)
q = numpy.zeros([m * n], numpy.float)
p0 = numpy.zeros([n], numpy.float)
q0 = numpy.zeros([n], numpy.float)
b = numpy.zeros([m], numpy.float)
y = numpy.zeros([m], numpy.float)
z = numpy.array([0.], numpy.float)
a = numpy.zeros([m], numpy.float)
c = 10000 * numpy.ones([m], numpy.float)
# Space for the Lagrange multipliers (ulam)
# the gradient of the dual objective function,
# search direction, and Hessian of the dual objective
ulam = numpy.ones([m], numpy.float)
gradf = numpy.zeros([m], numpy.float)
dsrch = numpy.zeros([m], numpy.float)
hessf = numpy.zeros([m * (m + 1) / 2], numpy.float)
# Specify that all variables are free to move
iyfree = numpy.ones([m], numpy.int)
#
iter = numpy.array([0], numpy.int)
maxit = numpy.array([self.options['MAXIT'][1]], numpy.int)
geps = numpy.array([self.options['GEPS'][1]], numpy.float)
dabobj = numpy.array([self.options['DABOBJ'][1]], numpy.float)
delobj = numpy.array([self.options['DELOBJ'][1]], numpy.float)
itrm = numpy.array([self.options['ITRM'][1]], numpy.int)
inform = numpy.array([0], numpy.int)
if (myrank == 0):
iprint = numpy.array([self.options['IPRINT'][1]], numpy.int)
else:
iprint = numpy.array([0], numpy.int)
iout = numpy.array([self.options['IOUT'][1]], numpy.int)
ifile = self.options['IFILE'][1]
if (myrank == 0):
if (iprint >= 0):
if os.path.isfile(ifile):
os.remove(ifile)
# end
# end
# end
#
nfunc = numpy.array([0], numpy.int)
# Run MMA
t0 = time.time()
mma.mma(n, m, iter, maxit, geps, dabobj, delobj, itrm, inform,
xval, xmma, xmin, xmax, xlow, xupp, alfa, beta, f0val, fval,
fmax, df0dx, dfdx, p, q, p0, q0, b, y, z, a, c, ulam, gradf,
dsrch, hessf, iyfree, iprint, iout, ifile, nfunc, func)
sol_time = time.time() - t0
if (myrank == 0):
if self.sto_hst:
log_file.close()
if tmp_file:
hos_file.close()
name = hos_file.filename
os.remove(name + '.cue')
os.remove(name + '.bin')
os.rename(name + '_tmp.cue', name + '.cue')
os.rename(name + '_tmp.bin', name + '.bin')
# end
# end
# end
if (iprint > 0):
mma.closeunit(self.options['IOUT'][1])
# end
# Store Results
sol_inform = {}
sol_inform['value'] = inform[0]
sol_inform['text'] = self.getInform(inform[0])
if store_sol:
sol_name = 'MMA Solution to ' + opt_problem.name
sol_options = copy.copy(self.options)
if sol_options.has_key('defaults'):
del sol_options['defaults']
# end
sol_evals = nfunc[0]
sol_vars = copy.deepcopy(opt_problem._variables)
i = 0
for key in sol_vars.keys():
sol_vars[key].value = xmma[i]
i += 1
# end
sol_objs = copy.deepcopy(opt_problem._objectives)
i = 0
for key in sol_objs.keys():
sol_objs[key].value = f0val[i]
i += 1
# end
if m > 0:
sol_cons = copy.deepcopy(opt_problem._constraints)
i = 0
for key in sol_cons.keys():
sol_cons[key].value = fval[i]
i += 1
# end
else:
sol_cons = {}
# end
sol_lambda = {}
opt_problem.addSol(self.__class__.__name__, sol_name, objfunc, sol_time,
sol_evals, sol_inform, sol_vars, sol_objs, sol_cons, sol_options,
display_opts=disp_opts, Lambda=sol_lambda, Sensitivities=sens_type,
myrank=myrank, arguments=args, **kwargs)
# end
return f0val, xmma, sol_inform
def _on_setOption(self, name, value):
'''
Set Optimizer Option Value (Optimizer Specific Routine)
Documentation last updated: May. 07, 2008 - Ruben E. Perez
'''
pass
def _on_getOption(self, name):
'''
Get Optimizer Option Value (Optimizer Specific Routine)
Documentation last updated: May. 07, 2008 - Ruben E. Perez
'''
pass
def _on_getInform(self, infocode):
'''
Get Optimizer Result Information (Optimizer Specific Routine)
Keyword arguments:
-----------------
id -> STRING: Option Name
Documentation last updated: May. 07, 2008 - Ruben E. Perez
'''
return self.informs[infocode]
def _on_flushFiles(self):
'''
Flush the Output Files (Optimizer Specific Routine)
Documentation last updated: August. 09, 2009 - Ruben E. Perez
'''
#
iPrint = self.options['IPRINT'][1]
if (iPrint >= 0):
mma.pyflush(self.options['IOUT'][1])
# end
# ==============================================================================
# MMA Optimizer Test
# ==============================================================================
if __name__ == '__main__':
# Test MMA
print 'Testing ...'
mma = MMA()
print mma
|
mit
| 4,926,176,408,482,124,000
| 32.500861
| 113
| 0.457357
| false
| 4.011542
| false
| false
| false
|
sonofatailor/django-oscar
|
src/oscar/apps/dashboard/vouchers/forms.py
|
1
|
3559
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from oscar.core.loading import get_model
from oscar.forms import widgets
Voucher = get_model('voucher', 'Voucher')
Benefit = get_model('offer', 'Benefit')
Range = get_model('offer', 'Range')
class VoucherForm(forms.Form):
"""
A specialised form for creating a voucher and offer
model.
"""
name = forms.CharField(label=_("Name"))
code = forms.CharField(label=_("Code"))
start_datetime = forms.DateTimeField(
widget=widgets.DateTimePickerInput(),
label=_("Start datetime"))
end_datetime = forms.DateTimeField(
label=_("End datetime"), widget=widgets.DateTimePickerInput())
usage = forms.ChoiceField(choices=Voucher.USAGE_CHOICES, label=_("Usage"))
benefit_range = forms.ModelChoiceField(
label=_('Which products get a discount?'),
queryset=Range.objects.all(),
)
type_choices = (
(Benefit.PERCENTAGE, _('Percentage off of products in range')),
(Benefit.FIXED, _('Fixed amount off of products in range')),
(Benefit.SHIPPING_PERCENTAGE,
_("Discount is a percentage off of the shipping cost")),
(Benefit.SHIPPING_ABSOLUTE,
_("Discount is a fixed amount of the shipping cost")),
(Benefit.SHIPPING_FIXED_PRICE, _("Get shipping for a fixed price")),
)
benefit_type = forms.ChoiceField(
choices=type_choices,
label=_('Discount type'),
)
benefit_value = forms.DecimalField(
label=_('Discount value'))
exclusive = forms.BooleanField(
required=False,
label=_("Exclusive offers cannot be combined on the same items"))
def __init__(self, voucher=None, *args, **kwargs):
self.voucher = voucher
super(VoucherForm, self).__init__(*args, **kwargs)
def clean_name(self):
name = self.cleaned_data['name']
try:
voucher = Voucher.objects.get(name=name)
except Voucher.DoesNotExist:
pass
else:
if (not self.voucher) or (voucher.id != self.voucher.id):
raise forms.ValidationError(_("The name '%s' is already in"
" use") % name)
return name
def clean_code(self):
code = self.cleaned_data['code'].strip().upper()
if not code:
raise forms.ValidationError(_("Please enter a voucher code"))
try:
voucher = Voucher.objects.get(code=code)
except Voucher.DoesNotExist:
pass
else:
if (not self.voucher) or (voucher.id != self.voucher.id):
raise forms.ValidationError(_("The code '%s' is already in"
" use") % code)
return code
def clean(self):
cleaned_data = super(VoucherForm, self).clean()
start_datetime = cleaned_data.get('start_datetime')
end_datetime = cleaned_data.get('end_datetime')
if start_datetime and end_datetime and end_datetime < start_datetime:
raise forms.ValidationError(_("The start date must be before the"
" end date"))
return cleaned_data
class VoucherSearchForm(forms.Form):
name = forms.CharField(required=False, label=_("Name"))
code = forms.CharField(required=False, label=_("Code"))
is_active = forms.BooleanField(required=False, label=_("Is Active?"))
def clean_code(self):
return self.cleaned_data['code'].upper()
|
bsd-3-clause
| 1,842,963,705,503,461,600
| 35.690722
| 78
| 0.602135
| false
| 4.231867
| false
| false
| false
|
mabrosim/mGerritStats
|
gerrit_stats.py
|
1
|
6247
|
#!/usr/bin/python3
"""
Copyright (c) 2014, Maxim Abrosimov
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to
endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
"""
import json
from os import path
from optparse import OptionParser
from models.utils import print_patch_stats
import params
from models.owner import Owner
from models.reviewer import Reviewer
from models.review import Review
from models.team import Team
def read_from_file(filename):
with open(filename, "r", encoding='utf-8') as f:
review_list = f.readlines()
return review_list
def get_reviews_from_file(filename):
def heuristic_filter(r):
def review_filter(key, filters, custom_type=0):
if custom_type == 0:
if key in filters:
return False
elif custom_type == 1:
for t in params.TOPIC_FILTER:
if key.find(t) >= 0:
return False
elif custom_type == 2:
if key.find('Merge remote-tracking branch') != -1:
return False
return True
return \
review_filter(r.subject, None, custom_type=2) and \
review_filter(r.branch, params.BRANCH_FILTER) and \
review_filter(r.project, params.PROJECT_FILTER) and \
review_filter(r.topic, params.TOPIC_FILTER, custom_type=1) and \
review_filter(r.owner['email'], params.EMAIL_FILTER)
return {review.id: review for review in
filter(heuristic_filter, map(lambda r: Review(json.loads(r)),
read_from_file(filename)))}
def reviewer_stats(email, reviews):
def reviewer_filter(patch_list):
for p in patch_list:
for a in p.approvals:
if 'email' in a['by'].keys():
if a['by']['email'] == email:
return True
return False
reviewer = Reviewer(email)
reviewer.fetch_data(list(filter(lambda r: reviewer_filter(r.patch_list), reviews.values())))
reviewer.gerrit_query(params.QUERY_COMMON_PREFIX, params.DAYS)
return reviewer
def owner_stats(email, reviews):
owner = Owner(email)
owner.fetch_data(list(filter(lambda r: r.owner['email'] == email, reviews.values())))
owner.gerrit_query(params.QUERY_COMMON_PREFIX, params.DAYS)
return owner
def team_stats(_name, team_emails, reviews):
def team_filter(r):
if r.owner['email'] in team_emails:
r.team_name = _name
return True
return False
team = Team(_name, team_emails)
team.fetch_data(list(filter(lambda r: team_filter(r), reviews.values())))
return team
def all_stats(reviews):
team = Team('all', list())
team.fetch_data(reviews.values())
team.print()
def parser_options():
parser = OptionParser()
parser.add_option("-R", "--Reviewers", dest="reviewers", action="append",
help="reviewers list", metavar="REVIEWERS")
parser.add_option("-O", "--Owners", dest="owners", action="append",
help="owners list", metavar="OWNERS")
parser.add_option("-T", "--Teams", dest="teams", action="append",
help="teams list", metavar="TEAMS")
parser.add_option("-d", "--days", dest="days",
help="fetch n days of data", metavar="DAYS")
parser.add_option("-f", "--file", dest="file",
help="data file path", metavar="FILE")
options, args = parser.parse_args()
if options.days:
params.DAYS = options.days
if options.owners:
params.OWNERS = options.owners
if options.reviewers:
params.REVIEWERS = options.reviewers
if options.teams:
params.TEAMS = options.teams
return options
def main():
options = parser_options()
if options.file:
file = options.file
else:
file = path.join('out', 'reviews', 'total_' + params.DAYS + '.txt')
reviews_dict = get_reviews_from_file(file)
print_patch_stats(list(filter(lambda review: review.status == 'MERGED', reviews_dict.values())),
list(filter(lambda review: review.status == 'ABANDONED', reviews_dict.values())))
# all_stats(reviews_dict)
# exit()
owners = []
for email in params.OWNERS:
owners.append(owner_stats(email, reviews_dict))
pass
reviewers = []
for email in params.REVIEWERS:
reviewers.append(reviewer_stats(email, reviews_dict))
pass
teams = []
for name, emails in params.TEAMS.items():
teams.append(team_stats(name, emails, reviews_dict))
pass
for o in owners:
o.print()
pass
for r in reviewers:
r.print()
pass
for t in teams:
t.print()
pass
if __name__ == '__main__':
main()
|
bsd-3-clause
| -9,002,832,067,118,284,000
| 32.951087
| 103
| 0.637746
| false
| 4.083007
| false
| false
| false
|
adaptivdesign/odooku-compat
|
odooku/services/wsgi/rules.py
|
1
|
2508
|
import os.path
import json
import re
from urlparse import urlparse, urlunparse
import logging
from werkzeug.wsgi import get_current_url
from werkzeug.utils import redirect
_logger = logging.getLogger(__name__)
class BadMatchPattern(Exception):
pass
def build_url_regex(pattern):
regex = "^"
# Build protocol regex
result = re.match(r'^(\*|https?):\/\/', pattern)
if not result:
raise BadMatchPattern("Invalid scheme: {}".format(pattern))
elif result.group(1) == "*":
regex += "(https?)"
else:
regex += result.group(1)
regex += ":\/\/"
pattern = pattern[len(result.group(0)):]
regex += "(.+)".join(map(re.escape, pattern.split("*")))
regex += "$"
return regex
class Rule(object):
def __init__(self, pattern, redirect=None):
self._regex = re.compile(build_url_regex(pattern))
self._redirect = redirect
def _match_url(self, url):
parts = urlparse(url)
return self._regex.match('%s://%s' % (parts[0], parts[1]))
def match(self, environ):
url = get_current_url(environ)
return bool(self._match_url(url))
def execute(self, environ, start_response):
url = get_current_url(environ)
if self._redirect:
groups = self._match_url(url).groups()
parts = urlparse(url)
new_parts = urlparse(self._redirect.format(*groups))
response = redirect(urlunparse(new_parts[:2] + parts[2:]))
return response(environ, start_response)
class WSGIApplicationRulesWrapper(object):
DEFAULT_PATH = os.path.abspath('rules.json')
_rules = []
def __init__(self, application):
self._application = application
def __call__(self, environ, start_response):
for rule in self._rules:
if(rule.match(environ)):
result = rule.execute(environ, start_response)
if result:
return result
return self._application(environ, start_response)
@classmethod
def has_rules(cls):
return bool(cls._rules)
@classmethod
def factory(cls, rules):
return type(cls.__name__, (cls,), {
'_rules': [Rule(pattern, **options) for (pattern, options) in rules.iteritems()]
})
@classmethod
def load(cls, path=DEFAULT_PATH):
rules = {}
if os.path.isfile(path):
with open(path) as f:
rules = json.load(f)
return cls.factory(rules)
|
apache-2.0
| -4,900,918,993,625,054,000
| 25.680851
| 92
| 0.588118
| false
| 3.949606
| false
| false
| false
|
Diacamma2/financial
|
setup.py
|
1
|
2722
|
# -*- coding: utf-8 -*-
'''
setup module to pip integration of Diacamma accounting
@author: Laurent GAY
@organization: sd-libre.fr
@contact: info@sd-libre.fr
@copyright: 2015 sd-libre.fr
@license: This file is part of Lucterios.
Lucterios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lucterios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Lucterios. If not, see <http://www.gnu.org/licenses/>.
'''
from setuptools import setup
from diacamma.accounting import __version__
setup(
name="diacamma-financial",
version=__version__,
author="Lucterios",
author_email="info@diacamma.org",
url="http://www.diacamma.org",
description="Diacamma financial modules for Lucterios framework.",
long_description="""
Diacamma financial modules for Lucterios framework.
""",
include_package_data=True,
platforms=('Any',),
license="GNU General Public License v3",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django :: 3.0',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Natural Language :: French',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Database :: Front-Ends',
'Topic :: Office/Business :: Financial :: Accounting',
],
packages=["diacamma", "diacamma.accounting", "diacamma.invoice", "diacamma.payoff"],
package_data={
"diacamma.accounting.migrations": ['*'],
"diacamma.accounting.system": ['*', 'locale/*/*/*'],
"diacamma.accounting": ['build', 'images/*', 'locale/*/*/*', 'help/*'],
"diacamma.invoice.migrations": ['*'],
"diacamma.invoice": ['build', 'images/*', 'locale/*/*/*', 'help/*'],
"diacamma.payoff.migrations": ['*'],
"diacamma.payoff": ['build', 'images/*', 'locale/*/*/*', 'help/*'],
},
install_requires=["lucterios ~=2.5", "lucterios-contacts ~=2.5", "requests"],
)
|
gpl-3.0
| -5,390,551,900,232,317,000
| 37.885714
| 88
| 0.649155
| false
| 3.754483
| false
| false
| false
|
Quantipy/quantipy
|
quantipy/core/builds/powerpoint/pptx_painter.py
|
1
|
52718
|
# encoding: utf-8
'''
@author: Majeed.sahebzadha
'''
from __future__ import unicode_literals
import copy
import time
import numpy as np
import pandas as pd
import quantipy as qp
from os import path
from collections import OrderedDict
from pptx import Presentation
from quantipy.core.cluster import Cluster
from quantipy.core.chain import Chain
from quantipy.core.helpers.functions import(
finish_text_key,
paint_view)
from quantipy.core.builds.powerpoint.add_shapes import(
chart_selector,
add_stacked_bar_chart,
add_textbox,
add_net)
from quantipy.core.builds.powerpoint.transformations import(
is_grid_element,
get_base,
validate_cluster_orientations,
drop_hidden_codes,
partition_view_df,
strip_html_tags,
rename_label,
df_splitter,
auto_sort,
round_df_cells)
from quantipy.core.builds.powerpoint.visual_editor import(
return_slide_layout_by_name)
from pptx.enum.text import(
PP_ALIGN,
MSO_AUTO_SIZE,
MSO_ANCHOR
)
from pptx.util import(
Emu,
Pt,
Cm,
Inches
)
from quantipy.core.builds.powerpoint.add_shapes import (
percentage_of_num,
get_cht_plot_height,
get_upper_cht_plot_gap)
thisdir = path.split(__file__)[0]
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def chain_generator(cluster):
'''
Generate chains
Parameters
----------
cluster : quantipy.Cluster
quantipy cluster object
'''
for chain_name in cluster.keys():
yield cluster[chain_name]
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def chain_has_weighted_views(chain):
'''
Check if a qp.Chain contains weighted frequency views
Parameters
----------
chain : quantipy.Chain
quantipy chain object
'''
for el in chain.views:
e0, e1, e2, e3, e4, e5 = el.split('|')
if e0 == 'x' and e1 == 'f' and e3 == 'y' and e4:
return True
elif e2 == 'x++:' and e3 == 'y' and e4:
return True
return False
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def get_grid_el_label(df):
'''
Grabs a grid element level label
Parameters
----------
df : dataframe
pandas dataframe object
'''
grid_el_label = strip_html_tags(df.index[0][0])
if ' - ' in grid_el_label:
label = grid_el_label.split(' - ')[-1].strip()
elif '. ' in grid_el_label:
label = grid_el_label.split('. ',1)[-1].strip()
else:
label = 'Label missing'
return label
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def df_meta_filter(
df,
meta,
conditions,
index_key=None):
'''
Selects rows based on multiple binary conditions -True/False
Parameters
----------
df: pandas dataframe
meta: pandas dataframe
conditions: dict or pandas.Series object
index_key: column label or list of column labels / arrays
example useage: df_meta_filter(
df,
meta,
{'is_pct': True, 'is_weighted': 'True'},
index_key='label')
resource: http://stackoverflow.com/questions/34740778/use-series-to-select-rows-from-df-pandas
http://stackoverflow.com/questions/34726569/get-subsection-of-df-based-on-multiple-conditions
'''
con = conditions.copy()
df = df.reset_index()
meta = meta.reset_index()
if not isinstance(con, pd.Series):
con = pd.Series(con)
# pull rows where all the conditions are met
# get subset of df based on labels in conditions
df = df[(meta == con)[con.index].all(axis=1)]
if not df.empty:
if not index_key:
key_names = ['Values']
else:
if not isinstance(index_key, list):
index_key = [index_key]
key_names = index_key
# replace names with labels (note in the future, use text first then labels)
if len(key_names)>1:
# use label and overlap those by text which are not empty string
idx = meta.loc[df.index]['label'].where(meta.loc[df.index]['text']=='', meta.loc[df.index]['text'].values)
else:
idx = meta.loc[df.index].set_index(key_names).index
df = df.set_index(df.columns[0])
# replace label index with name index
df.index = idx
return df
else:
# empty df
return pd.DataFrame()
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def gen_meta_df(painted_df, qp_view):
'''
Creates a df containing only metadata
Parameters
----------
painted_df: pandas dataframe
unpainted_df: pandas dataframe
qp_view: quantipy view
'''
df_meta = partition_view_df(qp_view.dataframe)[0]
df_meta['short_name'] = qp_view.meta()['agg']['name']
df_meta['text'] = qp_view.meta()['agg']['text']
df_meta['method'] = qp_view.meta()['agg']['method']
df_meta['is_block'] = qp_view.meta()['agg']['is_block']
df_meta['is_pct'] = str(qp_view.is_pct())
df_meta['is_base'] = str(qp_view.is_base())
df_meta['is_weighted'] = str(qp_view.is_weighted())
df_meta['is_counts'] = str(qp_view.is_counts())
df_meta['is_meanstest'] = str(qp_view.is_meanstest())
df_meta['is_propstest'] = str(qp_view.is_propstest())
df_meta['is_sum'] = str(qp_view.is_sum())
df_meta['is_stat'] = str(qp_view.is_stat())
df_meta['label'] = painted_df.index
# distinguish between net and expanded
net_bools=[]
for row in df_meta.index:
if qp_view.is_net():
v_described = qp_view.describe_block()
all_normal = all(vt == 'normal' for vt in v_described.itervalues())
if not all_normal:
if row in v_described:
if v_described[row] == 'net':
net_bools.append('True')
else:
net_bools.append('False')
else:
net_bools.append('True')
else:
net_bools.append('False')
df_meta['is_net'] = net_bools
# rearrange the columns
df_meta = df_meta[['label', 'short_name', 'text', 'method', 'is_pct',
'is_net', 'is_weighted', 'is_counts', 'is_block',
'is_base', 'is_stat', 'is_sum', 'is_propstest',
'is_meanstest']]
return df_meta
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def same_labels(listofdfs):
'''
Before concatenating dfs make sure their row/index labels match.
Some times, a set of grid element tables which belond to the same
grid contain different views which is not ideal.
Parameters
----------
listofdfs: list of pandas dataframes
'''
for x in range(0, len(listofdfs)):
if not all(listofdfs[0].index == listofdfs[x].index):
raise Exception('index labels mismatch')
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def same_num_of_elements(listofdfs):
'''
Counts the num of elements in listofdfs, checks if they are all the same.
Parameters
----------
listofdfs: list of pandas dataframes
'''
el_len = [len(el) for el in listofdfs]
if not all(x == el_len[0] for x in el_len):
raise Exception('cannot merge elements - uneven '
'number of element views.')
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def all_same(val_array):
'''
Check if all the values in given list the same
Parameters
----------
numpy_list: numpy array
'''
# check if val_array is a numpy array
if type(val_array).__module__ == np.__name__:
val = val_array.tolist()
if isinstance(val[0], list):
#handle list of lists
return all(round(x[0]) == round(val[0][0]) for x in val)
else:
#handle single list
return all(round(x) == round(val[0]) for x in val)
else:
raise Exception('This function only takes a numpy array')
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def insert_values_to_labels(
add_values_to,
take_values_from,
index_position=0):
'''
Takes two dfs, adds values from a given row from one df and adds
it to the other dfs column labels.
Parameters
----------
add_values_to: pandas dataframe
take_values_from: pandas dataframe
index_position: int, optional
'''
# check 1 - if the labels from both dfs are the same
if all(add_values_to.columns == take_values_from.columns):
# pull a given row's values
row_vals = take_values_from.ix[[index_position],:].values
# flatten the list of values
row_vals = row_vals.flatten()
# get column labels
col_labels = add_values_to.columns
# loop over and add the values to the labels
for x,y in zip(col_labels, row_vals):
col_name = x + " (n=" + str(int(round(y))) +")"
add_values_to.rename(columns={x: col_name}, inplace=True)
return add_values_to
else:
raise Exception('Cannot add values to df labels')
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def PowerPointPainter(
path_pptx,
meta,
cluster,
path_pptx_template=None,
slide_layout='Blank',
text_key=None,
force_chart=True,
force_crossbreak=None,
base_type='weighted',
base_repr=None,
include_nets=True,
shape_properties=None,
display_var_names=True,
date_range=None,
split_busy_dfs=False,
verbose=True):
'''
Builds PowerPoint file (PPTX) from cluster, list of clusters, or
dictionary of clusters.
Parameters
----------
path_pptx : str
PowerPoint file path
meta : dict
metadata as dictionary used to paint datframes
cluster : quantipy.Cluster / list / dict
container for cluster(s)
path_pptx_template : str, optional
full path to PowerPoint template
slide_layout : str / int, optional
valid slide layout name or index
text_key : str, optional
language
force_chart : boolean, optional
ues default settings to produce a PowerPoint file
force_crossbreak : str / list, optional
use given crossbreaks to build a PowerPoint file
base_type : str, optional
use weighted or unweighted base
include_nets : str / boolean
True/False: include/exclude net views in chart data
'partly': include nets in chart data except for array summary charts
shape_properties : dict, optional
keys as format properties, values as change from default
display_var_names : boolean
variable names append to question labels
split_busy_dfs : boolean
if True, spreads busy dataframes evenly across multiple slide
'''
if verbose:
print(
'\n{ast}\n{ast}\n{ast}\nINITIALIZING POWERPOINT '
'AUTOMATION SCRIPT...'.format(ast='*' * 80))
# check path extension
if path_pptx.endswith('.pptx'):
path_pptx = path_pptx[:-5]
elif path_pptx.endswith('/') or path_pptx.endswith('\\'):
raise Exception('File name not provided')
# check base type string
base_type = base_type.lower()
# render cluster
names = []
clusters = []
if isinstance(cluster, Cluster):
names.append(cluster.name)
clusters.append(cluster)
elif isinstance(cluster, list):
for c in cluster:
names.append(c.name)
clusters.append(c)
elif isinstance(cluster, dict):
names_clusters_dict = cluster
for sheet_name, c in cluster.iteritems():
names.append(sheet_name)
clusters.append(c)
# default settings
default_props = {
'crossbreak': ['@'],
'chart_type': 'bar',
'sort_order': 'none',
'chart_color': 'green',
'fixed_categories': [],
'base_description': '',
'chart_layout': '1',
'slide_title_text': 'Click to add title',
'question_label': 'Unknown',
'copied_from': '',
'center_header': '',
'right_footer': '',
'title_footer': ''}
spec = meta['sets'].get('spec', False)
# update 'crossbreak' key's value in default_props if
# force_crossbreak parameter is true
if force_crossbreak:
if isinstance(force_crossbreak, list):
pass
elif isinstance(force_crossbreak, str):
force_crossbreak = [force_crossbreak]
for c in force_crossbreak:
default_props['crossbreak'].append(c)
if not path_pptx_template:
path_pptx_template = path.join(
thisdir,
'templates\default_template.pptx')
# get the default text key if none provided
if text_key is None:
text_key = finish_text_key(meta, text_key)
# default shape properties (minimum level, only shape dimensions)
# if none provided
if shape_properties is None:
shape_properties = {
'header_shape': {
'left': 284400,
'top': 1007999,
'width': 8582400,
'height': 468000},
'chart_shape': {
'bar': {
'left': 284400,
'top': 1475999,
'width': 8582400,
'height': 4140000},
'stacked_bar': {
'left': 284400,
'top': 1475999,
'width': 8582400,
'height': 4140000},
'column': {
'left': 284400,
'top': 1475999,
'width': 8582400,
'height': 4140000},
'pie': {
'left': 284400,
'top': 1475999,
'width': 8582400,
'height': 4140000},
'line': {
'left': 284400,
'top': 1475999,
'width': 8582400,
'height': 4140000}},
'footer_shape': {
'left': 284400,
'top': 5652000,
'width': 8582400,
'height': 396000}}
############################################################################
############################################################################
############################################################################
# loop over clusters, returns pptx for each cluster
for cluster_name, cluster in zip(names, clusters):
if verbose:
print(
'\nPowerPoint minions are building your PPTX, '
'please stand by...\n\n{indent:>2}Building '
'PPTX for {file_name}').format(
indent='',
file_name=cluster_name)
# log start time
pptx_start_time = time.time()
# check if cluster is empty
if not cluster:
raise Exception("'{}' cluster is empty".format(cluster_name))
# ensure all chains in cluster have the same orientation
validate_cluster_orientations(cluster)
# pull orientation of chains in cluster
orientation = cluster[cluster.keys()[0]].orientation
# open pptx template file
prs = Presentation(path_pptx_template)
# log slide number
slide_num = len(prs.slides)
# Get Client and Topic tag from meta
if isinstance(spec, dict):
topic = u"{}".format(spec.get('topic', ""))
client = u"{}".format(spec.get('name', ""))
else:
topic = ""
client = ""
############################################################################
# frontpage title ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
############################################################################
title_shape=shape_properties['title_shape']
client_date_shape=shape_properties['client_date_shape']
if title_shape['addtext_frontpage']:
for shape in prs.slides[0].shapes:
if shape.name == title_shape['shape_name_frontpage']:
shape.text = topic
if client_date_shape['addtext']:
for shape in prs.slides[0].shapes:
if shape.name == title_shape['shape_name']:
shape.text = client_date_shape['t_d_text'].format(client,date_range)
############################################################################
# X ORIENTATION CODE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
############################################################################
if orientation == 'x':
# grid element storage dict
grid_container = []
# translated views contains names of all views
# which have been translated
translated_views = []
# This section tries to finds, pull and build grid element
# dataframes by matching the downbreak name against the grid element name.
# Each downbreak is therefore checked against all keys in masks.
for chain in chain_generator(cluster):
# list of crossbreak name
crossbreaks = chain.content_of_axis
# single downbreak name
downbreak = chain.source_name
'----CHART AND BASE DATA CONDITIONS ----------------------------'
# table selection conditions for chart shape
chartdata_conditions = OrderedDict([
('is_pct', 'True'),
('is_weighted', 'True'),
('is_sum', 'False')])
# Net settings
net_setup = shape_properties.get('net_setup', False)
if not include_nets:
chartdata_conditions.update({'is_net': 'False'})
chartdata_conditions_grid = copy.deepcopy(chartdata_conditions)
elif include_nets == True:
if net_setup:
chartdata_conditions.update({'is_net': 'False'})
chartdata_conditions_grid = copy.deepcopy(chartdata_conditions)
else:
chartdata_conditions_grid = copy.deepcopy(chartdata_conditions)
#elif include_net == 'partly':
else:
chartdata_conditions_grid = copy.deepcopy(chartdata_conditions)
chartdata_conditions_grid.update({'is_net': 'False'})
#---------------------------------------------------------------
# table selection conditions for footer/base shape
base_conditions = OrderedDict([
('is_base', 'True'),
('short_name', 'cbase'),
('is_weighted', 'True' if base_type == 'weighted' else 'False')])
'----PULL METADATA DETAILS -------------------------------------'
# for each downbreak, try and pull it's meta
if force_chart:
meta_props = []
else:
if downbreak in meta['columns']:
if 'properties' in meta['columns'][downbreak]:
meta_props = meta['columns'][downbreak]['properties']
else:
meta_props = []
else:
meta_props = []
if text_key['x'] in meta['columns'][downbreak]['text'].keys():
question_label = meta['columns'][downbreak]['text'][text_key['x']]
else:
question_label = meta['columns'][downbreak]['text'].values()[0]
chart_type = meta_props['chart_type'] if 'chart_type' in meta_props else default_props['chart_type']
layout_type = meta_props['chart_layout'] if 'chart_layout' in meta_props else default_props['chart_layout']
sort_order = meta_props['sort_order'] if 'sort_order' in meta_props else default_props['sort_order']
fixed_categories = meta_props['fixed_categories'] if 'fixed_categories' in meta_props else default_props['fixed_categories']
if fixed_categories:
fixed_values = map(lambda x: int(x['value']), fixed_categories)
values = loc_values = meta['columns'][downbreak]['values']
if isinstance(loc_values, (str, unicode)):
loc_values = loc_values.split('@')
values = meta[loc_values.pop(0)]
while loc_values:
values = values[loc_values.pop(0)]
fixed_categories = [
item['text'][text_key['x']]
for item in values
if item['value'] in fixed_values
]
slide_title_text = meta_props['slide_title'] if 'slide_title' in meta_props else default_props['slide_title_text']
copied_from = meta_props['copied_from'] if 'copied_from' in meta_props else default_props['copied_from']
base_description = meta_props['base_text'] if 'base_text' in meta_props else default_props['base_description']
'----IF GRID THEN-----------------------------------------------'
# loop over items in masks
for grid in meta['masks']:
for x in range(0, len(meta['masks'][grid]['items'])):
gridname = meta['masks'][grid]['items'][x]['source'].split('columns@')[-1]
if downbreak == gridname:
if text_key['x'] in meta['masks'][grid]['text'].keys():
grid_question_label = meta['masks'][grid]['text'][text_key['x']]
else:
grid_question_label = meta['masks'][grid]['text'].values()[0]
# check if grid is in grid container, if it's not then continue
if not grid in grid_container:
grid_container += [grid]
remaining_elements = [
grid_element['source'].split('@')[1]
for grid_element in meta['masks'][grid]['items'][0:]]
'----GROUP GRID-CHAIN VIEWS-------------------------------------'
grouped_grid_views = []
for grid_element_name in remaining_elements:
grid_chain = cluster[grid_element_name]
#prepare grid element labels
grid_el_label = meta['columns'][grid_element_name]['text'][text_key['x']]
if grid_el_label.startswith(grid_question_label):
grid_el_label = grid_el_label.split(grid_question_label)[-1].strip()
if grid_el_label.startswith('- '):
grid_el_label = grid_el_label[2:]
# use weighted freq views if available
has_weighted_views = chain_has_weighted_views(grid_chain)
#if the conditions for base and chartdata's "is_weighted" key
#is True but there are no weighted views in the chain then use
#unweighted views
if not has_weighted_views:
if chartdata_conditions['is_weighted']=='True':
chartdata_conditions['is_weighted'] = 'False'
chartdata_conditions_grid['is_weighted'] = 'False'
#an unweighted chart can only have unweighted base
if base_conditions['is_weighted']=='True':
base_conditions['is_weighted'] = 'False'
views_on_chain = []
meta_on_g_chain = []
# loop over views in chain
for v in grid_chain.views:
dk = grid_chain.data_key
fk = grid_chain.filter
# only pull '@' based views as these will be concatenated together
view = grid_chain[dk][fk][grid_element_name]['@'][v]
view.translate_metric(
text_key['x'][0],
set_value='meta')
trans_var_name = '{}x@'.format(grid_chain.name)
if not trans_var_name in translated_views:
translated_views.append(trans_var_name)
# paint view
df = paint_view(meta, view)
# flatten df
df = partition_view_df(df)[0]
# get meta data
df_meta = gen_meta_df(df, view)
# append
meta_on_g_chain.append(df_meta)
views_on_chain.append(df)
# this var will be overwritten but its okay for now.
grped_g_meta = pd.concat(
meta_on_g_chain,
axis=0)
# concat all the views together on a single chain
mdf = pd.concat(views_on_chain, axis=0)
mdf.rename(
columns={mdf.columns[0]: grid_el_label},
inplace=True)
grouped_grid_views.append(mdf)
'----CONCAT AND PREPARE GRID-CHAIN VIEWS------------------------'
# before merging all grid elements together, 2 checks are carried out:
# 1. ensure all grid elements have the same number of views
same_num_of_elements(grouped_grid_views)
# 2. ensure all the grid index labels are the name
same_labels(grouped_grid_views)
# concat all grid chains in grouped_grid_views together
merged_grid_df = pd.concat(grouped_grid_views, axis=1)
merged_grid_df = merged_grid_df.fillna(0.0)
slide_num += 1
if verbose:
print(
'\n{indent:>5}Slide {num}. '
'Adding a 100% STACKED BAR CHART '
'for {qname} cut by '
'Total{war_msg}'.format(
indent='',
num=slide_num,
qname=grid,
war_msg=''))
#extract df for net
if net_setup:
net_setup_stacked_bar = net_setup.get('stacked_bar', False)
if net_setup_stacked_bar:
df_grid_table_net = df_meta_filter(
merged_grid_df,
grped_g_meta,
{'is_net' : 'True'},
index_key='label')
#extract df for chart
df_grid_table = df_meta_filter(
merged_grid_df,
grped_g_meta,
chartdata_conditions_grid,
index_key='label')
#extract df for base
df_grid_base = df_meta_filter(
merged_grid_df,
grped_g_meta,
base_conditions,
index_key='text')
if not df_grid_table.empty:
# if not all the values in the grid's df are the same
# then add the values to the grids column labels
if not all_same(df_grid_base.values[0]):
df_grid_table = insert_values_to_labels(
df_grid_table,
df_grid_base,
index_position=0)
if base_description:
#remove the word "Base:" from the description
description = base_description.split(': ')[-1]
#grab the label for base from the df
base_label = df_grid_base.index[0]
#put them together
base_text = '{}: {}'.format(
base_label,
description)
else:
base_text = ''
else:
base_text = get_base(
df_grid_base,
base_description,
True)
if base_repr and ('Base' in base_text):
base_text = base_text.replace('Base', base_repr)
# get question label
if display_var_names:
if shape_properties['short_grid_name']:
grid_label = grid.partition('.')[0]
else:
grid_label = grid
grid_question_label = '{}. {}'.format(
grid_label,
strip_html_tags(grid_question_label))
# format table values
df_grid_table = np.round(df_grid_table/100, 4)
'----ADDPEND SLIDE TO PRES--------------------------------------'
if isinstance(slide_layout, int):
slide_layout_obj = prs.slide_layouts[slide_layout]
else:
slide_layout_obj = return_slide_layout_by_name(
prs,
slide_layout)
slide = prs.slides.add_slide(slide_layout_obj)
'----ADD SHAPES TO SLIDE----------------------------------------'
''' title shape'''
if title_shape['addtext']:
for shape in slide.placeholders:
if shape.name == title_shape['shape_name']:
shape.text = topic
''' header shape '''
sub_title_shp = add_textbox(
slide,
text=grid_question_label,
**(shape_properties['header_shape']
if shape_properties else {}))
''' net table '''
if include_nets and net_setup:
save_width = shape_properties['chart_shape']['stacked_bar']['width']
if net_setup_stacked_bar['show_table']:
if not df_grid_table_net.empty:
df_grid_table_net = round_df_cells(df_grid_table_net,
net_setup_stacked_bar['table_decimals'])
if net_setup_stacked_bar['add_percent_sign']:
df_grid_table_net = df_grid_table_net.astype(str) + '%'
cols = len(df_grid_table_net.T.columns)
shapes=shape_properties['chart_shape']['stacked_bar']
shapes['legend_position']='bottom'
shapes['width'] -= net_setup_stacked_bar['table_column_width'] * cols
# Set net table size and position
height = shapes['height']
top = shapes['top']
left = shapes['left'] + shapes['width']
width = net_setup_stacked_bar['table_column_width']
net_table = add_net(slide, df_grid_table_net.T, height=height, width=width, top=top, left=left)
''' chart shape '''
chart_shp = chart_selector(
slide,
df_grid_table,
chart_type='stacked_bar',
**(shape_properties['chart_shape']['stacked_bar']
if shape_properties else {}))
if include_nets and net_setup:
shape_properties['chart_shape']['stacked_bar']['width'] = save_width
''' footer shape '''
if base_text:
base_text_shp = add_textbox(
slide,
text=base_text,
**(shape_properties['footer_shape']
if shape_properties else {}))
'----IF NOT GRID THEN-------------------------------------------'
if 'crossbreak' in meta_props:
if meta_props['crossbreak'] != '@':
target_crossbreaks = default_props['crossbreak'] + meta_props['crossbreak'].split(',')
else:
target_crossbreaks = meta_props['crossbreak'].split(',')
else:
target_crossbreaks = default_props['crossbreak']
for crossbreak in crossbreaks:
if crossbreak in target_crossbreaks:
'----GROUP NON GRID-CHAIN VIEWS---------------------------------'
# are there any weighted views in this chain?
has_weighted_views = chain_has_weighted_views(chain)
# if "is_weighted" is True but there's no weighted views
# use unweighted views
if not has_weighted_views:
if chartdata_conditions['is_weighted']=='True':
chartdata_conditions['is_weighted'] = 'False'
# an unweighted chart can only have unweighted base
if base_conditions['is_weighted'] == 'True':
base_conditions['is_weighted'] = 'False'
views_on_chain = []
meta_on_chain = []
for v in chain.views:
dk = chain.data_key
fk = chain.filter
view = chain[dk][fk][downbreak][crossbreak][v]
trans_var_name = '{}x{}'.format(
downbreak,
crossbreak)
if trans_var_name not in translated_views:
view.translate_metric(
text_key['x'][0],
set_value='meta')
# paint view
df = paint_view(meta, view)
# flatten df
df = partition_view_df(df)[0]
# get meta data
df_meta = gen_meta_df(df, view)
# append to vars
meta_on_chain.append(df_meta)
views_on_chain.append(df)
'----CONCAT AND PREPARE NON GRID-CHAIN VIEWS--------------------'
grped_meta = pd.concat(meta_on_chain, axis=0)
grped_df = pd.concat(views_on_chain, axis=0)
grped_df = grped_df.fillna(0.0)
# replace '@' with 'Total'
grped_df = rename_label(
grped_df,
'@',
'Total',
orientation='Top')
# extract df for net
if net_setup:
df_table_net = df_meta_filter(
grped_df,
grped_meta,
{'is_net': 'True'},
index_key='label')
# standardise table values
df_table_net = np.round(df_table_net.fillna(0.0) / 100, 4)
#extract df for chart
df_table = df_meta_filter(
grped_df,
grped_meta,
chartdata_conditions,
index_key='label')
#extract df for base
df_base = df_meta_filter(
grped_df,
grped_meta,
base_conditions,
index_key='text')
if not df_table.empty:
# append nets to fixed categories
for x, item in enumerate(grped_meta['is_net'].tolist()):
if item == 'True':
if fixed_categories<>[]:
fixed_categories.append(grped_meta['label'].tolist()[x])
else:
fixed_categories = [grped_meta['label'].tolist()[x]]
# sort df whilst excluding fixed cats
if sort_order == 'ascending':
df_table = auto_sort(
df=df_table,
fixed_categories=fixed_categories,
column_position=0,
ascend=True)
elif sort_order == 'descending':
df_table = auto_sort(
df=df_table,
fixed_categories=fixed_categories,
column_position=0,
ascend=False)
# if not all the values in the grid's df are the same
# then add the values to the grids column labels
if not all_same(df_base.values):
df_table = insert_values_to_labels(
df_table,
df_base,
index_position=0)
base_text = base_description
else:
if not df_base.empty:
base_text = get_base(
df_base,
base_description,
False)
else:
raise Exception('Base dataframe empty for "{}".'.format(downbreak))
if base_repr and ('Base' in base_text):
base_text = base_text.replace('Base', base_repr)
# standardise table values
df_table = np.round(df_table.fillna(0.0)/100, 4)
# get question label
if display_var_names:
if shape_properties['short_grid_name'] and '_grid' in downbreak:
downbreak_label = downbreak.partition('{')[2].partition('}')[0]
else:
downbreak_label = downbreak
question_label = '{}. {}'.format(
downbreak_label,
strip_html_tags(question_label))
# handle incorrect chart type assignment
if len(df_table.index) > 15 and chart_type == 'pie':
chart_type='bar'
'----SPLIT DFS & LOOP OVER THEM---------------------------------'
if split_busy_dfs:
# split large dataframes
collection_of_dfs = df_splitter(
df_table,
min_rows=5,
max_rows=15)
else:
# dont split large/busy dataframes
collection_of_dfs = [df_table]
for i, df_table_slice in enumerate(collection_of_dfs):
'----ADDPEND SLIDE TO PRES--------------------------------------'
if isinstance(slide_layout, int):
slide_layout_obj = prs.slide_layouts[slide_layout]
else:
slide_layout_obj = return_slide_layout_by_name(
prs,
slide_layout)
slide = prs.slides.add_slide(slide_layout_obj)
'----ADD SHAPES TO SLIDE----------------------------------------'
''' title shape'''
if title_shape['addtext']:
for shape in slide.placeholders:
if shape.name == title_shape['shape_name']:
shape.text = topic
''' title shape '''
if i > 0:
cont_question_label = '{} (continued {})'.format(
question_label,
i+1)
else:
cont_question_label = question_label
''' header shape '''
sub_title_shp = add_textbox(
slide,
text=cont_question_label,
**(shape_properties['header_shape']
if shape_properties else {}))
''' chart shape '''
numofcols = len(df_table_slice.columns)
numofrows = len(df_table_slice.index)
# handle incorrect chart type assignment
if chart_type == 'pie' and numofcols > 1:
chart_type = 'bar'
# turn legend off if table contains 1 series unless its a pie chart
if numofcols == 1:
legend_switch = False
if chart_type == 'pie':
legend_switch = True
else:
legend_switch = True
if 'has_legend' in shape_properties['chart_shape'][chart_type]:
shape_properties['chart_shape'][chart_type]['has_legend'] = legend_switch
# Net settings
if include_nets and net_setup:
net_setup = net_setup.get(chart_type, False)
if not net_setup == False and net_setup['show_nets']:
if len(collection_of_dfs) == 1:
if not df_table_net.empty:
if net_setup['separator']:
df_table_slice = df_table_slice.T
df_table_slice.insert(len(df_table_slice.columns), 'net_separator', 1.01)
df_table_slice = df_table_slice.T
#df_table_slice.loc[len(df_table_slice)]=0
df_table_slice = pd.concat([df_table_slice, df_table_net])
shape_properties['chart_shape']['bar']['separator_color'] = net_setup['separator_color']
chart = chart_selector(
slide,
df_table_slice,
chart_type=chart_type,
**(shape_properties['chart_shape'][chart_type]
if shape_properties else {}))
''' footer shape '''
base_text_shp = add_textbox(
slide,
text=base_text,
**(shape_properties['footer_shape']
if shape_properties else {}))
slide_num += 1
if verbose:
print(
'\n{indent:>5}Slide {slide_number}. '
'Adding a {chart_name} '
'CHART for {question_name} '
'cut by {crossbreak_name} '
'{x}'.format(
indent='',
slide_number=slide_num,
chart_name=chart_type.upper().strip(),
question_name=downbreak,
crossbreak_name='Total' if crossbreak == '@' else crossbreak,
x='(cont ('+str(i)+'))' if i > 0 else ''))
else:
if verbose:
print(
'\n{indent:>5}***Skipping {question_name}, '
'no views match your conditions: '
'{conditions}'.format(
indent='',
question_name=downbreak,
conditions=chartdata_conditions))
prs.save('{}.pptx'.format(path_pptx))
print 'Created: {}.pptx\n'.format(path_pptx)
############################################################################
# Y ORIENTATION CODE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
############################################################################
if orientation == 'y':
# raise error is cluster is y orientated
raise TypeError('y orientation not supported yet')
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
if verbose:
pptx_elapsed_time = time.time() - pptx_start_time
print(
'\n{indent:>2}Presentation saved, '
'time elapsed: {time:.2f} seconds\n'
'\n{line}'.format(
indent='',
time=pptx_elapsed_time,
line= '_' * 80))
|
mit
| -3,561,614,369,308,608,000
| 43.263644
| 187
| 0.393509
| false
| 5.346111
| false
| false
| false
|
Spurlock/loveletter
|
engine.py
|
1
|
14563
|
"""
NOTES AND DEFINITIONS
round: each time an affection token is given, one round has ended
game: each time a player reaches AFFECTION_GOAL, one game has ended
match: a set of games, ending at a given number of wins
deck = [CARD_RANK, CARD_RANK, ..., CARD_RANK] # burner is not separate, is just the last card
player_action = {
card: CARD_RANK,
?target_player: player_idx,
?guess: CARD_RANK
}
game_history = [
{
player: INT,
player_action,
?eliminated_player: INT
}
]
"""
from random import shuffle, randint
from copy import copy
from itertools import permutations
import sys
from bots.IdiotBot import IdiotBot
# from bots.MarkBot import MarkBot
# from bots.BenBot1 import BenBot
# from bots.BsonBot import BsonBot
from common import (full_deck, get_card_name,
GUARD, PRIEST, BARON, HANDMAID, PRINCE, KING, COUNTESS, PRINCESS, SUICIDE,
AFFECTION_GOAL, mprint)
class GameState(object):
def __init__(self, players, affections):
self.players = players
player_states = []
for player_idx, player in enumerate(players):
player_states.append(PlayerState(player_idx, player, affections[player_idx]))
game_deck = [card for card in full_deck()]
shuffle(game_deck)
self.deck = game_deck
self.player_states = player_states
self.history = []
self.turn_record = None
self.current_player_idx = -1 # TO DO: respect last winner
def __str__(self):
player_descriptions = "\r\n".join([player.short_description() for player in self.player_states])
return """GAME STATE:
%s
deck: %r,
current player idx: %d
""" % (player_descriptions, self.deck, self.current_player_idx)
def deal_card(self, player_idx):
card = self.deck.pop(0)
self.player_states[player_idx].hand.append(card)
def advance_current_player(self):
self.current_player_idx = (self.current_player_idx + 1) % len(self.players)
while not self.player_states[self.current_player_idx].is_alive:
self.current_player_idx = (self.current_player_idx + 1) % len(self.players)
def eliminate_player(self, player_idx, reason=None):
mprint("Eliminating player %d" % player_idx, 4)
if reason:
mprint("Reason: %s" % reason, 4)
mprint(lvl=4)
self.turn_record['eliminated_player'] = player_idx
player_state = self.player_states[player_idx]
player_state.is_alive = False
player_state.graveyard.extend(player_state.hand)
player_state.hand = []
def get_winner(self):
remaining_players = [idx for idx, player_state in enumerate(self.player_states) if player_state.is_alive]
if len(remaining_players) == 0:
sys.exit("Everyone was eliminated. This is not supposed to happen.")
elif len(remaining_players) == 1:
return remaining_players[0]
elif len(self.deck) < 2:
player_states = {player_idx: self.player_states[player_idx] for player_idx in remaining_players}
high_card = max([player_state.hand[0] for _, player_state in player_states.iteritems()])
top_players = [player_idx for player_idx, player_state in player_states.iteritems() if player_state.hand[0] == high_card]
if len(top_players) == 1:
return top_players[0]
else:
winning_player = None
max_graveyard_score = -1
for player_idx in top_players:
graveyard_score = sum(player_states[player_idx].graveyard)
if graveyard_score > max_graveyard_score:
winning_player = player_idx
max_graveyard_score = graveyard_score
return winning_player
return None
def get_available_targets(self):
available_targets = []
for idx, p_state in enumerate(self.player_states):
if idx != self.current_player_idx and p_state.is_alive and not p_state.handmaided:
available_targets.append(idx)
return available_targets
def sanitize_action(self, player_action):
if 'card' not in player_action:
player_action['card'] = None
played_card = player_action['card']
target = player_action.get('target_player')
available_targets = self.get_available_targets()
if played_card != GUARD:
player_action['guess'] = None
if target is not None:
if played_card not in [GUARD, PRIEST, BARON, PRINCE, KING]:
player_action['target_player'] = None
if len(available_targets) == 0 and played_card != PRINCE:
player_action['target_player'] = None
return player_action
def get_action_error(self, player_action):
def target_is_valid():
if len(available_targets) > 0:
if target not in available_targets:
return False
elif not isinstance(target, int) and target is not None:
return False
if target == self.current_player_idx:
return False
return True
current_player_state = self.player_states[self.current_player_idx]
played_card = player_action['card']
target = player_action.get('target_player')
guess = player_action.get('guess')
available_targets = self.get_available_targets()
# is choice of card valid?
if played_card not in current_player_state.hand:
return "played card not in hand"
if played_card == GUARD:
if not target_is_valid():
return "invalid guard target"
if len(available_targets) > 0 and (not isinstance(guess, int) or guess < 2 or guess > 8):
return "invalid guard guess"
elif played_card in [PRIEST, BARON, KING]:
if not target_is_valid():
return "invalid target"
elif played_card == PRINCE:
if not target_is_valid() and target != self.current_player_idx:
return "invalid prince target"
if played_card in [PRINCE, KING] and COUNTESS in current_player_state.hand:
return "countess cheating"
return None
class PublicGameState(object):
def __init__(self, game_state):
self.player_states = [PublicPlayerState(p) for p in game_state.player_states]
self.cards_remaining = len(game_state.deck)
self.history = [copy(record) for record in game_state.history]
self.current_player_idx = game_state.current_player_idx
def __str__(self):
players = "\r\n".join([player.short_description() for player in self.player_states])
return """
GAME STATE:
%s
cards remaining: %d,
current player idx: %d,
history: %r
""" % (players, self.cards_remaining, self.current_player_idx, self.history)
class PlayerState(object):
def __init__(self, idx, player, affection):
self.my_idx = idx
self.name = player.name
self.graveyard = []
self.is_alive = True
self.affection = affection
self.hand = []
self.handmaided = False
def __str__(self):
return """
P%d %s
hand: %r
is_alive: %r
handmaided: %r
graveyard: %r
affection: %d
""" % (self.my_idx, self.name, self.hand, self.is_alive, self.handmaided, self.graveyard, self.affection)
def short_description(self):
alive = "alive" if self.is_alive else "dead"
handmaided = "handmaided, " if self.handmaided else ""
affection = "<3" * self.affection
return "P%d (%s): %s, %s%r %s" % (self.my_idx, self.name, alive, handmaided, self.hand, affection)
class PublicPlayerState(object):
def __init__(self, player_state):
self.player_idx = player_state.my_idx
self.graveyard = player_state.graveyard
self.is_alive = player_state.is_alive
self.affection = player_state.affection
self.handmaided = player_state.handmaided
def __str__(self):
return """
P%d
is_alive: %r
handmaided: %r
graveyard: %r
affection: %d
""" % (self.player_idx, self.is_alive, self.handmaided, self.graveyard, self.affection)
def short_description(self):
alive = "alive" if self.is_alive else "dead"
handmaided = ", handmaided" if self.handmaided else ""
return "P%d: %s%s" % (self.player_idx, alive, handmaided)
def describe_action(action, player_idx):
targeting = ""
guessing = ""
if action.get('target_player') is not None:
targeting = " Target: P%d." % action['target_player']
if action.get('guess') is not None:
guessing = " Guess: %s." % get_card_name(action['guess'])
return "ACTION: P%d plays %s.%s%s" % (player_idx, get_card_name(action['card']), targeting, guessing)
def play_round(players, affections, starting_player=None):
if starting_player is None:
starting_player = randint(0, len(players) - 1)
starting_player -= 1 # it's gonna be incremented anyway
mprint("BEGINNING ROUND", 4)
mprint(lvl=4)
game_state = GameState(players, affections)
for player_idx, _ in enumerate(players):
game_state.deal_card(player_idx)
winner = None
# play a round
while winner is None:
# whose turn is it?
game_state.advance_current_player()
current_player_idx = game_state.current_player_idx
current_player = players[current_player_idx]
current_player_state = game_state.player_states[current_player_idx]
# every turn housekeeping
current_player_state.handmaided = False
game_state.turn_record = {}
game_state.deal_card(current_player_idx)
public_game_state = PublicGameState(game_state)
mprint(game_state, 4)
player_action = current_player.play_turn(current_player_state.hand, public_game_state)
player_action = game_state.sanitize_action(player_action)
mprint(describe_action(player_action, current_player_idx), 5)
mprint(lvl=5)
action_error = game_state.get_action_error(player_action)
if action_error is not None:
game_state.eliminate_player(current_player_idx, action_error)
game_state.turn_record = {
'player_idx': current_player_idx,
'action': {'card': SUICIDE},
'eliminated_player': current_player_idx
}
else: # valid move, carry on
played_card = player_action['card']
target = player_action.get('target_player')
guess = player_action.get('guess')
target_player_state = game_state.player_states[target] if target is not None else None
game_state.turn_record = {
'player_idx': current_player_idx,
'action': player_action,
'eliminated_player': None
}
current_player_state.hand.remove(played_card)
current_player_state.graveyard.append(played_card)
if played_card == GUARD:
if target is not None:
if guess in target_player_state.hand:
game_state.eliminate_player(target, "guessed by guard")
elif played_card == PRIEST:
if target is not None:
current_player.learn(target, target_player_state.hand, len(game_state.history))
elif played_card == BARON:
if target is not None:
my_card = current_player_state.hand[0]
their_card = target_player_state.hand[0]
if my_card != their_card:
loser = target if my_card > their_card else current_player_idx
game_state.eliminate_player(loser, "outranked in baron-off")
elif played_card == HANDMAID:
current_player_state.handmaided = True
elif played_card == PRINCE:
discarded = target_player_state.hand.pop(0)
target_player_state.graveyard.append(discarded)
if discarded == PRINCESS:
game_state.eliminate_player(target, "discarded princess")
else:
game_state.deal_card(target)
elif played_card == KING:
if target is not None:
my_card = current_player_state.hand.pop()
current_player_state.hand.append(target_player_state.hand.pop())
target_player_state.hand.append(my_card)
elif played_card == COUNTESS:
pass
elif played_card == PRINCESS:
game_state.eliminate_player(current_player_idx, "played princess")
# update history
game_state.history.append(game_state.turn_record)
# check for winner
winner = game_state.get_winner()
mprint("Round over. Winner: Player %d" % winner, 3)
mprint(lvl=3)
return winner
def play_game(players):
mprint("BEGINING GAME", 2)
mprint(lvl=2)
for p in players:
p.reset()
affections = [0 for _ in players]
winner = None
while max(affections) < AFFECTION_GOAL:
winner = play_round(players, affections, winner)
affections[winner] += 1
mprint("END OF GAME", 2)
mprint("Final affection scores:", 2)
mprint(affections, 2)
return affections.index(AFFECTION_GOAL)
def play_match(players, num_games):
wins = [0 for _ in players]
for _ in xrange(num_games):
winner = play_game(players)
wins[winner] += 1
return wins
def play_tournament(games_per_match):
player_match_wins = {cls_idx: 0 for cls_idx, _ in enumerate(PLAYER_CLASSES)}
for player_arrangement in permutations(range(len(PLAYER_CLASSES)), 4):
players = [PLAYER_CLASSES[cls_idx](position) for position, cls_idx in enumerate(player_arrangement)]
match_results = play_match(players, games_per_match)
mprint("END OF MATCH", 1)
mprint("Games won:", 1)
mprint(match_results, 1)
for match_idx, wins in enumerate(match_results):
player_match_wins[player_arrangement[match_idx]] += wins
return player_match_wins
PLAYER_CLASSES = [IdiotBot, IdiotBot, IdiotBot, IdiotBot]
tourney_results = play_tournament(games_per_match=5)
mprint("END OF TOURNAMENT", 1)
mprint("Results:", 1)
mprint(tourney_results, 1)
|
mit
| -6,736,501,290,146,427,000
| 33.265882
| 133
| 0.606468
| false
| 3.589598
| false
| false
| false
|
netsamir/dotfiles
|
files/vim/bundle/YouCompleteMe/third_party/ycmd/ycmd/__main__.py
|
1
|
5997
|
# Copyright (C) 2013 Google Inc.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
# Other imports from `future` must be placed after SetUpPythonPath.
import sys
import os
sys.path.insert( 0, os.path.dirname( os.path.abspath( __file__ ) ) )
from server_utils import SetUpPythonPath, CompatibleWithCurrentCore
SetUpPythonPath()
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
import sys
import logging
import json
import argparse
import waitress
import signal
import os
import base64
from ycmd import user_options_store
from ycmd import extra_conf_store
from ycmd import utils
from ycmd.watchdog_plugin import WatchdogPlugin
from ycmd.hmac_plugin import HmacPlugin
from ycmd.utils import ToBytes, ReadFile, OpenForStdHandle
def YcmCoreSanityCheck():
if 'ycm_core' in sys.modules:
raise RuntimeError( 'ycm_core already imported, ycmd has a bug!' )
# We manually call sys.exit() on SIGTERM and SIGINT so that atexit handlers are
# properly executed.
def SetUpSignalHandler( stdout, stderr, keep_logfiles ):
def SignalHandler( signum, frame ):
# We reset stderr & stdout, just in case something tries to use them
if stderr:
tmp = sys.stderr
sys.stderr = sys.__stderr__
tmp.close()
if stdout:
tmp = sys.stdout
sys.stdout = sys.__stdout__
tmp.close()
if not keep_logfiles:
if stderr:
utils.RemoveIfExists( stderr )
if stdout:
utils.RemoveIfExists( stdout )
sys.exit()
for sig in [ signal.SIGTERM,
signal.SIGINT ]:
signal.signal( sig, SignalHandler )
def PossiblyDetachFromTerminal():
# If not on windows, detach from controlling terminal to prevent
# SIGINT from killing us.
if not utils.OnWindows():
try:
os.setsid()
# setsid() can fail if the user started ycmd directly from a shell.
except OSError:
pass
def ParseArguments():
parser = argparse.ArgumentParser()
# Not using 'localhost' on purpose; see #987 and #1130
parser.add_argument( '--host', type = str, default = '127.0.0.1',
help = 'server hostname')
# Default of 0 will make the OS pick a free port for us
parser.add_argument( '--port', type = int, default = 0,
help = 'server port')
parser.add_argument( '--log', type = str, default = 'info',
help = 'log level, one of '
'[debug|info|warning|error|critical]' )
parser.add_argument( '--idle_suicide_seconds', type = int, default = 0,
help = 'num idle seconds before server shuts down')
parser.add_argument( '--options_file', type = str, required = True,
help = 'file with user options, in JSON format' )
parser.add_argument( '--stdout', type = str, default = None,
help = 'optional file to use for stdout' )
parser.add_argument( '--stderr', type = str, default = None,
help = 'optional file to use for stderr' )
parser.add_argument( '--keep_logfiles', action = 'store_true', default = None,
help = 'retain logfiles after the server exits' )
return parser.parse_args()
def SetupLogging( log_level ):
numeric_level = getattr( logging, log_level.upper(), None )
if not isinstance( numeric_level, int ):
raise ValueError( 'Invalid log level: %s' % log_level )
# Has to be called before any call to logging.getLogger()
logging.basicConfig( format = '%(asctime)s - %(levelname)s - %(message)s',
level = numeric_level )
def SetupOptions( options_file ):
options = user_options_store.DefaultOptions()
user_options = json.loads( ReadFile( options_file ) )
options.update( user_options )
utils.RemoveIfExists( options_file )
hmac_secret = ToBytes( base64.b64decode( options[ 'hmac_secret' ] ) )
del options[ 'hmac_secret' ]
user_options_store.SetAll( options )
return options, hmac_secret
def CloseStdin():
sys.stdin.close()
os.close( 0 )
def Main():
args = ParseArguments()
if args.stdout is not None:
sys.stdout = OpenForStdHandle( args.stdout )
if args.stderr is not None:
sys.stderr = OpenForStdHandle( args.stderr )
SetupLogging( args.log )
options, hmac_secret = SetupOptions( args.options_file )
# This ensures that ycm_core is not loaded before extra conf
# preload was run.
YcmCoreSanityCheck()
extra_conf_store.CallGlobalExtraConfYcmCorePreloadIfExists()
code = CompatibleWithCurrentCore()
if code:
sys.exit( code )
PossiblyDetachFromTerminal()
# This can't be a top-level import because it transitively imports
# ycm_core which we want to be imported ONLY after extra conf
# preload has executed.
from ycmd import handlers
handlers.UpdateUserOptions( options )
handlers.SetHmacSecret( hmac_secret )
SetUpSignalHandler( args.stdout, args.stderr, args.keep_logfiles )
handlers.app.install( WatchdogPlugin( args.idle_suicide_seconds ) )
handlers.app.install( HmacPlugin( hmac_secret ) )
CloseStdin()
waitress.serve( handlers.app,
host = args.host,
port = args.port,
threads = 30 )
if __name__ == "__main__":
Main()
|
unlicense
| 541,239,393,013,748,600
| 31.770492
| 80
| 0.677505
| false
| 3.805203
| false
| false
| false
|
ellisonbg/altair
|
altair/vegalite/tests/test_common.py
|
1
|
2712
|
"""Tests of functionality that should work in all vegalite versions"""
import pytest
import pandas as pd
from .. import v1, v2
v1_defaults = {
'width': 400,
'height': 300
}
v2_defaults = {
'config': {
'view': {
'height': 300,
'width': 400
}
}
}
basic_spec = {
'data': {'url': 'data.csv'},
'mark': 'line',
'encoding': {
'color': {'type': 'nominal', 'field': 'color'},
'x': {'type': 'quantitative', 'field': 'xval'},
'y': {'type': 'ordinal', 'field': 'yval'}
},
}
def make_basic_chart(alt):
data = pd.DataFrame({
'a': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'],
'b': [28, 55, 43, 91, 81, 53, 19, 87, 52]
})
return alt.Chart(data).mark_bar().encode(
x='a',
y='b'
)
spec_v1 = dict(v1_defaults, **basic_spec)
spec_v2 = dict(v2_defaults, **basic_spec)
@pytest.mark.parametrize('alt,basic_spec', [(v1, spec_v1), (v2, spec_v2)])
def test_basic_chart_to_dict(alt, basic_spec):
chart = alt.Chart('data.csv').mark_line().encode(
alt.X('xval:Q'),
y=alt.Y('yval:O'),
color='color:N'
)
dct = chart.to_dict()
# schema should be in the top level
assert dct.pop('$schema').startswith('http')
# remainder of spec should match the basic spec
assert dct == basic_spec
@pytest.mark.parametrize('alt,basic_spec', [(v1, spec_v1), (v2, spec_v2)])
def test_basic_chart_from_dict(alt, basic_spec):
chart = alt.Chart.from_dict(basic_spec)
dct = chart.to_dict()
# schema should be in the top level
assert dct.pop('$schema').startswith('http')
# remainder of spec should match the basic spec
assert dct == basic_spec
@pytest.mark.parametrize('alt', [v1, v2])
def test_theme_enable(alt):
active_theme = alt.themes.active
try:
alt.themes.enable('none')
chart = alt.Chart.from_dict(basic_spec)
dct = chart.to_dict()
# schema should be in the top level
assert dct.pop('$schema').startswith('http')
# remainder of spec should match the basic spec
# without any theme settings
assert dct == basic_spec
finally:
# reset the theme to its initial value
alt.themes.enable(active_theme)
@pytest.mark.parametrize('alt', [v1, v2])
def test_max_rows(alt):
basic_chart = make_basic_chart(alt)
with alt.data_transformers.enable('default'):
basic_chart.to_dict() # this should not fail
with alt.data_transformers.enable('default', max_rows=5):
print(alt.data_transformers.options)
with pytest.raises(alt.MaxRowsError):
basic_chart.to_dict() # this should not fail
|
bsd-3-clause
| -2,823,563,517,079,322,600
| 23.880734
| 74
| 0.580383
| false
| 3.21327
| true
| false
| false
|
maruqu/flask-jsonapi
|
flask_jsonapi/response.py
|
1
|
2412
|
import http
import json
from flask import helpers
class BaseResponse:
def __init__(self, headers=None, status=None):
self.status = status or http.HTTPStatus.OK
self.headers = headers or {}
def make_response(self):
response = helpers.make_response(
self.get_content(),
self.status,
)
response.headers.extend(self.headers)
return response
def get_content(self):
raise NotImplementedError
class EmptyResponse(BaseResponse):
def __init__(self, headers=None, status=http.HTTPStatus.NO_CONTENT):
super().__init__(headers, status)
def get_content(self):
return ''
class BaseJsonApiResponse(BaseResponse):
base_header = {'Content-Type': 'application/vnd.api+json'}
def make_response(self):
response = super().make_response()
response.headers.extend(self.base_header)
return response
def get_content(self):
data = dict(self.get_response_data(), **{'jsonapi': {'version': '1.0'}})
return json.dumps(data)
def get_response_data(self):
raise NotImplementedError
class JsonApiResponse(BaseJsonApiResponse):
def __init__(self, response_data, links=None, headers=None, status=None):
self.response_data = response_data
self.links = links or {}
super().__init__(headers, status)
def get_response_data(self):
return dict(
**self.response_data,
**self.get_links(),
)
def get_links(self):
if self.links:
links = {'links': self.links}
else:
links = {}
return links
class JsonApiListResponse(JsonApiResponse):
def get_response_data(self):
response_data = super().get_response_data()
return dict(**response_data, **{'meta': {'count': len(self.response_data['data'])}})
class JsonApiErrorResponse(BaseJsonApiResponse):
def __init__(self, *jsonapi_errors, headers=None, status=http.HTTPStatus.INTERNAL_SERVER_ERROR):
super().__init__(headers, status)
self.jsonapi_errors_tuple = jsonapi_errors
@classmethod
def from_marshmallow_errors(cls, errors, status=http.HTTPStatus.UNPROCESSABLE_ENTITY):
return cls(*errors['errors'], status=status)
def get_response_data(self):
return {
'errors': list(self.jsonapi_errors_tuple),
}
|
bsd-3-clause
| -8,647,365,526,588,198,000
| 27.046512
| 100
| 0.623964
| false
| 3.986777
| false
| false
| false
|
basilfx/Happening-eetlijst
|
server.py
|
1
|
3840
|
from flask import request, abort, jsonify, Flask
from werkzeug.contrib.cache import SimpleCache, RedisCache
from datetime import datetime
import pytz
import cPickle
import eetlijst
import calendar
import functools
# App definition
app = Flask(__name__)
app.debug = True
# Use simple cache for cli-mode. For WSGI mode use a shared cache.
if __name__ == "__main__":
cache = SimpleCache()
else:
cache = RedisCache("10.0.0.3", key_prefix="eetlijst")
def to_unix_timestamp(timestamp):
"""
Convert datetime object to unix timestamp. Input is local time, result is an
UTC timestamp.
"""
if timestamp is not None:
return calendar.timegm(timestamp.utctimetuple())
def from_unix_timestamp(timestamp):
"""
Convert unix timestamp to datetime object. Input is a UTC timestamp, result
is local time.
"""
if timestamp is not None:
return datetime.fromtimestamp(int(timestamp), tz=pytz.UTC).astimezone(
eetlijst.TZ_LOCAL)
def inject_client(func):
"""
Inject the Eetlijst client from cache, if available. Otherwise, create a new
one.
"""
@functools.wraps(func)
def _inner():
username = request.args.get("username")
password = request.args.get("password")
if not username or not password:
return abort(400)
# Fetch eetlijst client from cache
key = username + "-" + password
client = cache.get(key)
if client:
try:
client = cPickle.loads(client)
except cPickle.UnpicklingError:
client = None
if not client:
app.logger.debug("Creating new client")
try:
client = eetlijst.Eetlijst(username=username, password=password,
login=True)
except eetlijst.LoginError:
return abort(401)
else:
app.logger.debug("Continuing existing client")
# Invoke original method
try:
result = func(client)
# Store in cache again
cache.set(key, cPickle.dumps(client,
protocol=cPickle.HIGHEST_PROTOCOL), timeout=60)
except:
app.logger.debug("Client state NOT updated due to exception")
raise
return result
return _inner
@app.route("/info", methods=["GET"])
@inject_client
def get_info(client):
return jsonify({
"result": {
"name": client.get_name(),
"residents": client.get_residents()
}
})
@app.route("/status", methods=["GET"])
@inject_client
def get_status(client):
status_rows = client.get_statuses(limit=1)
return jsonify({
"result": [{
"statuses": [{
"value": status.value,
"last_changed": to_unix_timestamp(status.last_changed)
} for status in status_row.statuses ],
"deadline": to_unix_timestamp(status_row.deadline),
"timestamp": to_unix_timestamp(status_row.timestamp)
} for status_row in status_rows ]
})
@app.route("/status", methods=["POST"])
@inject_client
def set_status(client):
timestamp = from_unix_timestamp(request.args["timestamp"])
resident = request.args["resident"]
value = request.args["value"]
client.set_status(resident, value, timestamp)
return jsonify({
"result": True
})
@app.route("/noticeboard", methods=["GET"])
@inject_client
def get_noticeboard(client):
return jsonify({
"result": client.get_noticeboard()
})
@app.route("/noticeboard", methods=["POST"])
@inject_client
def set_noticeboard(client):
client.set_noticeboard(request.args["content"])
return jsonify({
"result": True
})
# E.g. `python server.py'
if __name__ == '__main__':
app.run()
|
mit
| 2,811,164,308,548,508,000
| 24.952703
| 80
| 0.605469
| false
| 3.995838
| false
| false
| false
|
joshbressers/cve-analysis
|
check-url.py
|
1
|
2597
|
#!/usr/bin/env python
import sys
from elasticsearch import Elasticsearch
import requests
from queue import Queue
import threading
# Setup some queues
global total_size
global url_q
global count_q
url_q = Queue()
url_q.maxsize = 1000
count_q = Queue()
q_threads = 8
class Reference:
"CVE References class"
def __init__(self, my_url, my_id):
self.url = my_url
self.id = my_id
self.status = 0
def get_id(self):
return self.id
def get_url(self):
return self.url
def get_status(self):
return self.status
def check_url(self):
"Get the return code for a URL"
try:
r = requests.head(self.url, timeout=10)
self.status = r.status_code
except requests.ConnectionError:
pass
except requests.exceptions.InvalidSchema:
pass
except requests.exceptions.ReadTimeout:
pass
def update_status(the_q):
"Pull data from the status_queue and update it"
while True:
the_data = the_q.get()
the_data.check_url()
the_path = the_data.get_url()
the_id = the_data.get_id()
status = the_data.get_status()
#es.update(index="cve-references", id=the_id, doc_type='ref',
# body={"doc": {"status_code": status}})
the_q.task_done()
count_q.put(1)
print("%d/%d" % (count_q.qsize(), total_size))
# Set up some threads
for i in range(q_threads):
worker = threading.Thread(
target=update_status,
args=(url_q,),
name='worker-{}'.format(i),
)
worker.setDaemon(True)
worker.start()
# Setup all the ES connections and run our first query
es = Elasticsearch(['http://elastic:changeme@localhost:9200'])
res = es.search(index="cve-index", scroll='5m',
size=10, body={"_source": ["references.reference_data.url"],"query": {"match_all": {}}})
sid = res['_scroll_id']
scroll_size = res['hits']['total']['value']
total_size = res['hits']['total']['value']
current = 0
while(scroll_size > 0):
for hit in res['hits']['hits']:
# Not all CVE IDs have references
if 'references' in hit['_source']:
for url in hit['_source']['references']['reference_data']:
the_path = url['url']
the_id = hit['_id']
the_ref = Reference(the_path, the_id)
url_q.put(the_ref)
res = es.scroll(scroll_id = sid, scroll = '5m')
# Update the scroll ID
sid = res['_scroll_id']
scroll_size = len(res['hits']['hits'])
|
gpl-3.0
| -8,219,658,797,768,336,000
| 23.5
| 104
| 0.576434
| false
| 3.547814
| false
| false
| false
|
nickw444/quadcopter
|
single_prop/PIDTest.py
|
1
|
1390
|
#!/usr/bin/env python3
from BasicPID import BasicPID2, VideoPID
from RPIO import PWM
from MiniMu9 import Accelerometer
from MotorControl import MotorControl
import sys
import time
PWM.set_loglevel(PWM.LOG_LEVEL_ERRORS)
servo = PWM.Servo(pulse_incr_us=1)
PWM_MAX = 2000
PWM_MIN = 1000
PWM_RANGE = PWM_MAX - PWM_MIN
pid = VideoPID(setpoint=0, kP=2.5, kI=0.2, kD=0.5, zeros=False)
accel = Accelerometer()
controller = MotorControl(pins=[25])
controller.begin_calibration()
print("Press [RETURN] once you have connected power to the motors.")
raw_input()
controller.continue_calibration()
print("Motors Calibrated. Beginning PID Loop")
motor_weight_offset = 27 # Percentage power at which the motor balances.
print("WAIT")
raw_input()
while True:
# Get Reading
current = accel.readX()
# Calculate pid
output = pid.update(current)
# Output New Motor value
scaled_output = motor_weight_offset + output
scaled_output = round(scaled_output, 0)
# Put some output caps.
if scaled_output < 15: scaled_output = 15
if scaled_output > 50: scaled_output = 50
controller.set_motor(0, scaled_output)
# sys.stdout.write('\r')
# sys.stdout.write("Current Value: {}. Last PID Output: {}. Motor Output: {}".format(round(current, 2), round(output,2), scaled_output))
# sys.stdout.flush()
time.sleep(0.05)
print("KEK:")
print("DICKS")
|
gpl-3.0
| -8,715,218,448,289,010,000
| 22.559322
| 140
| 0.701439
| false
| 3.075221
| false
| false
| false
|
apache/incubator-datafu
|
datafu-spark/src/main/resources/pyspark_utils/bridge_utils.py
|
2
|
2961
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from py4j.java_gateway import JavaGateway, GatewayClient
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
# use jvm gateway to create a java class instance by full-qualified class name
def _getjvm_class(gateway, fullClassName):
return gateway.jvm.java.lang.Thread.currentThread().getContextClassLoader().loadClass(fullClassName).newInstance()
class Context(object):
def __init__(self):
from py4j.java_gateway import java_import
"""When running a Python script from Scala - this function is called
by the script to initialize the connection to the Java Gateway and get the spark context.
code is basically copied from:
https://github.com/apache/zeppelin/blob/master/spark/interpreter/src/main/resources/python/zeppelin_pyspark.py#L30
"""
if os.environ.get("SPARK_EXECUTOR_URI"):
SparkContext.setSystemProperty("spark.executor.uri", os.environ["SPARK_EXECUTOR_URI"])
gateway = JavaGateway(GatewayClient(port=int(os.environ.get("PYSPARK_GATEWAY_PORT"))), auto_convert=True)
java_import(gateway.jvm, "org.apache.spark.SparkEnv")
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
intp = gateway.entry_point
jSparkSession = intp.pyGetSparkSession()
jsc = intp.pyGetJSparkContext(jSparkSession)
jconf = intp.pyGetSparkConf(jsc)
conf = SparkConf(_jvm = gateway.jvm, _jconf = jconf)
self.sc = SparkContext(jsc=jsc, gateway=gateway, conf=conf)
# Spark 2
self.sparkSession = SparkSession(self.sc, jSparkSession)
self.sqlContext = self.sparkSession._wrapped
ctx = None
def get_contexts():
global ctx
if not ctx:
ctx = Context()
return ctx.sc, ctx.sqlContext, ctx.sparkSession
|
apache-2.0
| 3,148,224,414,666,694,000
| 40.125
| 122
| 0.716988
| false
| 3.796154
| false
| false
| false
|
arpan-chavda/rh_app
|
libs/venus/planet/shell/dj.py
|
1
|
1662
|
import os.path
import urlparse
import datetime
import tmpl
from planet import config
def DjangoPlanetDate(value):
return datetime.datetime(*value[:6])
# remap PlanetDate to be a datetime, so Django template authors can use
# the "date" filter on these values
tmpl.PlanetDate = DjangoPlanetDate
def run(script, doc, output_file=None, options={}):
"""process a Django template file"""
# this is needed to use the Django template system as standalone
# I need to re-import the settings at every call because I have to
# set the TEMPLATE_DIRS variable programmatically
from django.conf import settings
settings._wrapped=None
try:
settings.configure(
DEBUG=True, TEMPLATE_DEBUG=True,
TEMPLATE_DIRS=(os.path.dirname(script),)
)
except EnvironmentError:
pass
from django.template import Context
from django.template.loader import get_template
# set up the Django context by using the default htmltmpl
# datatype converters
context = Context(autoescape=(config.django_autoescape()=='on'))
context.update(tmpl.template_info(doc))
context['Config'] = config.planet_options()
t = get_template(script)
if output_file:
reluri = os.path.splitext(os.path.basename(output_file))[0]
context['url'] = urlparse.urljoin(config.link(),reluri)
f = open(output_file, 'w')
ss = t.render(context)
if isinstance(ss,unicode): ss=ss.encode('utf-8')
f.write(ss)
f.close()
else:
# @@this is useful for testing purposes, but does it
# belong here?
return t.render(context)
|
gpl-3.0
| 7,470,166,845,927,974,000
| 31.588235
| 72
| 0.669073
| false
| 4.053659
| true
| false
| false
|
joshrabinowitz/bitcoin
|
test/functional/test_framework/messages.py
|
1
|
39921
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message structures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization."""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_UNSUPPORTED_SERVICE_BIT_5 = (1 << 5)
NODE_UNSUPPORTED_SERVICE_BIT_7 = (1 << 7)
NODE_NETWORK_LIMITED = (1 << 10)
MSG_TX = 1
MSG_BLOCK = 2
MSG_WITNESS_FLAG = 1 << 30
MSG_TYPE_MASK = 0xffffffff >> 2
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.time = 0
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f, with_time=True):
if with_time:
self.time = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self, with_time=True):
r = b""
if with_time:
r += struct.pack("<i", self.time)
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness():
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness():
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness():
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_with_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.hash
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK | NODE_WITNESS
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f, False)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f, False)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize(False)
r += self.addrFrom.serialize(False)
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
|
mit
| 287,877,331,666,698,980
| 28.246154
| 262
| 0.576188
| false
| 3.446516
| false
| false
| false
|
RoboticsClubatUCF/RoboSub
|
ucf_sub_catkin_ros/src/sub_vision/src/particle.py
|
1
|
1397
|
import math
import numpy as np
import random
def initParticles(particleNum, imageHeight, imageWidth):
particles = np.zeros((particleNum,3), dtype=float)
for i in range(particleNum):
particles[i][0] = random.randint(0,imageWidth)
particles[i][1] = random.randint(0,imageHeight)
particles[i][2] = 0
return particles
def add_gaussian(particles):
noiseX = np.random.normal(0, 1, len(particles))
noiseY = np.random.normal(0, 1, len(particles))
particles[...,0] = particles[...,0]+noiseX
particles[...,1] = particles[...,1]+noiseY
return particles
# Takes in weights and returns
def resample_particles(curPart, desiredParticleSize):
weights = curPart[...,2]
newIndices = np.random.choice(len(curPart),desiredParticleSize, p=weights)
newParticles = np.zeros(len(desiredParticleSize),3, dtype=float)
curIndex = 0
for i in newIndices:
newParticles[0] = curPart[i]
return newParticles
def euclidean_distance(curPosit, virtualPosit):
return math.sqrt((virtualPosit[0]-curPosit[0])**2+(virtualPosit[1]-curPosit[1])**2)
def update(curPart, curPosit):
total = 0
for i in range(len(curPart)):
curPart[i][2] = euclidean_distance(curPosit,curPart[i])
total = total + curPart[i][2]
curPart[...,2] = np.divide(curPart[...,2],total)
return curPart
def findBestCommand(particles):
return particles[np.unravel_index(particles[...,2].argmax(), particles[...,2].shape)]
|
mit
| -780,931,888,643,623,800
| 28.104167
| 86
| 0.718683
| false
| 2.959746
| false
| false
| false
|
mcjug2015/mfserver2
|
django_app/services/user_service.py
|
1
|
4947
|
''' user services module '''
# pylint: disable=no-member
import logging
import random
import string
import django.core.mail as django_mail
from django.contrib.auth.models import User
from django.utils import timezone
from django.http.response import HttpResponseForbidden
from django_app.models import UserConfirmation
LOGGER = logging.getLogger(__name__)
def get_user_to_register(email):
'''
if no matching user exists will return an unsaved user obj
if matching user exists but is inactive, will return that db object
Otherwise None will be returned since registration should not go through
'''
retval = None
user_query = User.objects.filter(username=email)
if user_query.count() == 0:
next_pk = User.objects.latest('pk').pk + 1
user = User(pk=next_pk, username=email, email=email, first_name='NOT_SET', last_name='NOT_SET',
is_active=False, is_superuser=False, is_staff=False)
retval = user
else:
user = user_query[0:1][0]
if not user.is_active:
retval = user
return retval
def create_user_and_conf(email, password):
''' create an inactive user and a conf to activate him with '''
retval = {"user": None,
"conf": None,
"status": "Active user with email %s already exists" % email}
user = get_user_to_register(email)
if not user:
return retval
retval["user"] = user
retval["status"] = "confirmation emailed to %s, click the link to complete registration" % email
user.set_password(password)
user.save()
user_confirmation = create_conf(user=user, conf_type="registration")
user_confirmation.save()
retval["conf"] = user_confirmation
return retval
def complete_user_registration(conf_str):
''' set user to active if the conf str is good '''
retval = {"status": "Confirmation ivalid, used or expired, unable to complete user registration",
"code": 400}
conf_query = UserConfirmation.objects.filter(confirmation_key=conf_str,
conf_type="registration",
is_confirmed=False)
if conf_query.count() == 0:
return retval
conf = conf_query[0:1][0]
conf.is_confirmed = True
conf.confirmation_date = timezone.now()
conf.save()
conf.user.is_active = True
conf.user.save()
retval["status"] = "Successfully completed registration for %s" % conf.user.username
retval["code"] = 200
return retval
def request_password_reset(username):
''' generate a conf if user is eligible to reset password '''
retval = {"conf": None, "user": None, "status": "invalid username %s" % username}
user = User.objects.filter(username=username)
if user.count() == 0:
return retval
user = user[0:1][0]
if not user.is_active:
retval["status"] = "Inactive user %s ineligible to reset password" % username
return retval
user_confirmation = create_conf(user=user, conf_type="password_reset")
user_confirmation.save()
retval["conf"] = user_confirmation
retval["user"] = user
retval["status"] = "successful password reset request for %s" % username
return retval
def reset_password(conf, password):
''' reset password or error out '''
if conf.is_confirmed:
return "The password reset link you clicked has already been used and can not be used again."
conf.user.set_password(password)
conf.user.save()
conf.is_confirmed = True
conf.confirmation_date = timezone.now()
conf.save()
return "Successfully changed password for user %s" % conf.user.username
def create_conf(user, conf_type):
''' create a user confirmation '''
user_confirmation = UserConfirmation(user=user, conf_type=conf_type)
user_confirmation.confirmation_key = ''.join([random.choice(string.digits + string.ascii_letters)
for i in range(0, 64)]) # pylint: disable=unused-variable
return user_confirmation
def get_conf_and_response(conf_str, conf_type):
'''
get (confirmation, response) if there is no confirmation for provided input
reponse will be none-none
'''
reset_conf = UserConfirmation.objects.filter(confirmation_key=conf_str,
conf_type=conf_type)
if reset_conf.count() == 0:
return None, HttpResponseForbidden()
return reset_conf[0:1][0], None
def send_email_to_user(user, subject_text, message_text):
''' send email to user with supplied subject and body '''
LOGGER.debug("About to send conf email with message %s", message_text)
django_mail.send_mail(subject=subject_text, message=message_text,
from_email="meetingfinder@noreply.com",
recipient_list=[user.email], fail_silently=False)
|
gpl-3.0
| 8,109,688,632,868,514,000
| 37.952756
| 108
| 0.642612
| false
| 4.074959
| false
| false
| false
|
deep-compute/django-yada
|
setup.py
|
1
|
1803
|
from setuptools import setup, find_packages
import os
HERE = os.path.abspath(os.path.dirname(__file__))
def get_long_description():
dirs = [ HERE ]
if os.getenv("TRAVIS"):
dirs.append(os.getenv("TRAVIS_BUILD_DIR"))
long_description = ""
for d in dirs:
rst_readme = os.path.join(d, "README.rst")
if not os.path.exists(rst_readme):
continue
with open(rst_readme) as fp:
long_description = fp.read()
return long_description
return long_description
long_description = get_long_description()
# https://docs.djangoproject.com/en/1.11/intro/reusable-apps/
version = '0.0.1'
setup(
name='django-yada',
version=version,
packages=find_packages(),
include_package_data=True,
license='MIT License', # example license
description='Yet another django api library (with hmac auth)',
long_description=long_description,
url='https://github.com/deep-compute/django-yada',
download_url="https://github.com/deep-compute/django-yada/tarball/%s" % version,
author='Deep Compute, LLC',
author_email='contact@deepcompute.com',
install_requires=[
# TODO change this once our middleware supports
# the latest django version
"django==1.9"
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
mit
| 5,523,698,637,969,473,000
| 30.631579
| 84
| 0.621187
| false
| 3.911063
| false
| false
| false
|
kizbitz/train
|
train/vpc/config.py
|
2
|
3380
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import amis
def check_env(env, default=None):
"""Check/Set environment variables"""
if not os.environ.get(env) and not default:
print "Error: '{0}' environment variable not set".format(env)
sys.exit()
return os.environ.get(env, default)
def check_user_file(VPC, user_file):
"""Check/create USER_FILE"""
if user_file:
return user_file
elif os.path.exists('/host/{0}/users.cfg'.format(VPC)):
return '/host/{0}/users.cfg'.format(VPC)
else:
if not os.path.exists('/host/{0}'.format(VPC)):
os.makedirs('/host/{0}'.format(VPC))
with open('/host/{0}/users.cfg'.format(VPC), 'w') as f:
f.write(TRAINER + '\n')
return '/host/{0}/users.cfg'.format(VPC)
def get_email_template(VPC, template):
"""Check EMAIL_TEMPLATE"""
if template:
return template
elif os.path.exists('/host/{0}/email.py'.format(VPC)):
return '/host/{0}/email.py'.format(VPC)
else:
return '/home/train/train/templates/email.py'
def check_ses_region(env):
"""Check/Set SES_REGION environment variable"""
# Available SES Regions: http://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region
SES_REGIONS = ['us-east-1', 'us-west-2', 'eu-west-1']
if not os.environ.get(env):
print "Error: '{0}' environment variable not set".format(env)
sys.exit()
else:
if not os.environ.get(env) in SES_REGIONS:
print "Error: The '{0}' region specified is not one of the available SES regions".format(os.environ.get(env))
print " See: http://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region"
sys.exit()
else:
return os.environ.get(env)
# Required environment variables
# ==============================
# Trainer name. Used to tag VPC, Security Groups, etc...
TRAINER = check_env('TRAINER')
# AWS region, id, and key
AWS_REGION = check_env('AWS_REGION')
AWS_ACCESS_KEY_ID = check_env('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = check_env('AWS_SECRET_ACCESS_KEY')
# Optional environment variables
# ==============================
# Tag for VPC, labs, instances, etc...
VPC = check_env('VPC', 'train')
# Root lab directory
LAB_DIR = check_env('LAB_DIR', '/home/train/train/labs/')
# Full path to user configuration file
USER_FILE = check_user_file(VPC, os.environ.get('USER_FILE'))
# Email Template
EMAIL_TEMPLATE = get_email_template(VPC, os.environ.get('EMAIL_TEMPLATE'))
# Note: Checked in ses.py
# SES_REGION
# SES_FROM_EMAIL
# SES_FROM_NAME
# Other
# =====
# AWS AMI dictionary
AMIS = getattr(amis, AWS_REGION.upper().replace('-', '_'))
# AWS IAM Profile
IAM_PROFILE = TRAINER + '-{0}'.format(VPC)
# AWS Gateway
IGW = TRAINER + '-{0}-igw'.format(VPC)
# IAM Policy
POLICY = """{
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeAvailabilityZones",
"ec2:DescribeTags"
],
"Resource": [
"*"
]
}
]
}"""
# AWS Network ACL
NETWORK_ACL = TRAINER + '-{0}-network-acl'.format(VPC)
# AWS Route Table
ROUTE_TABLE = TRAINER + '-{0}-route-table'.format(VPC)
# AWS VPC CIDR
VPC_CIDR = "10.0.0.0/16"
# AWS VPC Tag
VPC_TAG = TRAINER + '-{0}'.format(VPC)
# AWS Zones
ZONES=['a', 'b', 'c', 'd', 'e', 'f']
|
apache-2.0
| -8,407,919,099,661,274,000
| 23.671533
| 121
| 0.602959
| false
| 3.067151
| false
| false
| false
|
pombredanne/git-git.fedorahosted.org-git-pyrpm
|
tests/coverage.py
|
1
|
21827
|
#!/usr/bin/python
#
# Perforce Defect Tracking Integration Project
# <http://www.ravenbrook.com/project/p4dti/>
#
# COVERAGE.PY -- COVERAGE TESTING
#
# Gareth Rees, Ravenbrook Limited, 2001-12-04
#
#
# 1. INTRODUCTION
#
# This module provides coverage testing for Python code.
#
# The intended readership is all Python developers.
#
# This document is not confidential.
#
# See [GDR 2001-12-04a] for the command-line interface, programmatic
# interface and limitations. See [GDR 2001-12-04b] for requirements and
# design.
"""Usage:
coverage.py -x MODULE.py [ARG1 ARG2 ...]
Execute module, passing the given command-line arguments, collecting
coverage data.
coverage.py -e
Erase collected coverage data.
coverage.py -r [-m] FILE1 FILE2 ...
Report on the statement coverage for the given files. With the -m
option, show line numbers of the statements that weren't executed.
coverage.py -a [-d dir] FILE1 FILE2 ...
Make annotated copies of the given files, marking statements that
are executed with > and statements that are missed with !. With
the -d option, make the copies in that directory. Without the -d
option, make each copy in the same directory as the original.
Coverage data is saved in the file .coverage by default. Set the
COVERAGE_FILE environment variable to save it somewhere else."""
import os
import re
import string
import sys
import types
# 2. IMPLEMENTATION
#
# This uses the "singleton" pattern.
#
# The word "morf" means a module object (from which the source file can
# be deduced by suitable manipulation of the __file__ attribute) or a
# filename.
#
# When we generate a coverage report we have to canonicalize every
# filename in the coverage dictionary just in case it refers to the
# module we are reporting on. It seems a shame to throw away this
# information so the data in the coverage dictionary is transferred to
# the 'cexecuted' dictionary under the canonical filenames.
#
# The coverage dictionary is called "c" and the trace function "t". The
# reason for these short names is that Python looks up variables by name
# at runtime and so execution time depends on the length of variables!
# In the bottleneck of this application it's appropriate to abbreviate
# names to increase speed.
# A dictionary with an entry for (Python source file name, line number
# in that file) if that line has been executed.
c = {}
# t(f, x, y). This method is passed to sys.settrace as a trace
# function. See [van Rossum 2001-07-20b, 9.2] for an explanation of
# sys.settrace and the arguments and return value of the trace function.
# See [van Rossum 2001-07-20a, 3.2] for a description of frame and code
# objects.
import os.path
def do_realpath(path):
if os.path.islink(path):
path = os.readlink(path)
head, tail = os.path.split(path)
if not tail:
return path
head = do_realpath(head)
return os.path.join(head, tail)
def realpath(path):
return do_realpath(os.path.abspath(path))
def t(f, x, y):
c[(f.f_code.co_filename, f.f_lineno)] = 1
return t
the_coverage = None
class coverage:
error = "coverage error"
# Name of the cache file (unless environment variable is set).
cache_default = ".coverage"
# Environment variable naming the cache file.
cache_env = "COVERAGE_FILE"
# A map from canonical Python source file name to a dictionary in
# which there's an entry for each line number that has been
# executed.
cexecuted = {}
# Cache of results of calling the analysis() method, so that you can
# specify both -r and -a without doing double work.
analysis_cache = {}
# Cache of results of calling the canonical_filename() method, to
# avoid duplicating work.
canonical_filename_cache = {}
def __init__(self):
global the_coverage
if the_coverage:
raise self.error, "Only one coverage object allowed."
self.cache = os.environ.get(self.cache_env, self.cache_default)
self.restore()
self.analysis_cache = {}
def help(self, error=None):
if error:
print error
print
print __doc__
sys.exit(1)
def command_line(self):
import getopt
settings = {}
optmap = {
'-a': 'annotate',
'-d:': 'directory=',
'-e': 'erase',
'-h': 'help',
'-i': 'ignore-errors',
'-m': 'show-missing',
'-r': 'report',
'-x': 'execute',
}
short_opts = string.join(map(lambda o: o[1:], optmap.keys()), '')
long_opts = optmap.values()
options, args = getopt.getopt(sys.argv[1:], short_opts,
long_opts)
for o, a in options:
if optmap.has_key(o):
settings[optmap[o]] = 1
elif optmap.has_key(o + ':'):
settings[optmap[o + ':']] = a
elif o[2:] in long_opts:
settings[o[2:]] = 1
elif o[2:] + '=' in long_opts:
settings[o[2:]] = a
else:
self.help("Unknown option: '%s'." % o)
if settings.get('help'):
self.help()
for i in ['erase', 'execute']:
for j in ['annotate', 'report']:
if settings.get(i) and settings.get(j):
self.help("You can't specify the '%s' and '%s' "
"options at the same time." % (i, j))
args_needed = (settings.get('execute')
or settings.get('annotate')
or settings.get('report'))
action = settings.get('erase') or args_needed
if not action:
self.help("You must specify at least one of -e, -x, -r, "
"or -a.")
if not args_needed and args:
self.help("Unexpected arguments %s." % args)
if settings.get('erase'):
self.erase()
if settings.get('execute'):
if not args:
self.help("Nothing to do.")
sys.argv = args
self.start()
import __main__
sys.path[0] = os.path.dirname(sys.argv[0])
execfile(sys.argv[0], __main__.__dict__)
if not args:
args = self.cexecuted.keys()
ignore_errors = settings.get('ignore-errors')
show_missing = settings.get('show-missing')
directory = settings.get('directory=')
if settings.get('report'):
self.report(args, show_missing, ignore_errors)
if settings.get('annotate'):
self.annotate(args, directory, ignore_errors)
def start(self):
sys.settrace(t)
def stop(self):
sys.settrace(None)
def erase(self):
global c
c = {}
self.analysis_cache = {}
self.cexecuted = {}
if os.path.exists(self.cache):
os.remove(self.cache)
# save(). Save coverage data to the coverage cache.
def save(self):
self.canonicalize_filenames()
cache = open(self.cache, 'wb')
import marshal
marshal.dump(self.cexecuted, cache)
cache.close()
# restore(). Restore coverage data from the coverage cache (if it
# exists).
def restore(self):
global c
c = {}
self.cexecuted = {}
if not os.path.exists(self.cache):
return
try:
cache = open(self.cache, 'rb')
import marshal
cexecuted = marshal.load(cache)
cache.close()
if isinstance(cexecuted, types.DictType):
self.cexecuted = cexecuted
except (IOError, EOFError, ValueError, TypeError):
pass
# canonical_filename(filename). Return a canonical filename for the
# file (that is, an absolute path with no redundant components and
# normalized case). See [GDR 2001-12-04b, 3.3].
def canonical_filename(self, filename):
if not self.canonical_filename_cache.has_key(filename):
f = filename
if os.path.isabs(f) and not os.path.exists(f):
f = os.path.basename(f)
if not os.path.isabs(f):
for path in [os.curdir] + sys.path:
g = os.path.join(path, f)
if os.path.exists(g):
f = g
break
cf = realpath(os.path.normcase(os.path.abspath(f)))
self.canonical_filename_cache[filename] = cf
return self.canonical_filename_cache[filename]
# canonicalize_filenames(). Copy results from "executed" to
# "cexecuted", canonicalizing filenames on the way. Clear the
# "executed" map.
def canonicalize_filenames(self):
global c
for filename, lineno in c.keys():
f = self.canonical_filename(filename)
if not self.cexecuted.has_key(f):
self.cexecuted[f] = {}
self.cexecuted[f][lineno] = 1
c = {}
# morf_filename(morf). Return the filename for a module or file.
def morf_filename(self, morf):
if isinstance(morf, types.ModuleType):
if not hasattr(morf, '__file__'):
raise self.error, "Module has no __file__ attribute."
file = morf.__file__
else:
file = morf
return self.canonical_filename(file)
# analyze_morf(morf). Analyze the module or filename passed as
# the argument. If the source code can't be found, raise an error.
# Otherwise, return a pair of (1) the canonical filename of the
# source code for the module, and (2) a list of lines of statements
# in the source code.
def analyze_morf(self, morf):
if self.analysis_cache.has_key(morf):
return self.analysis_cache[morf]
filename = self.morf_filename(morf)
ext = os.path.splitext(filename)[1]
if ext == '.pyc':
if not os.path.exists(filename[0:-1]):
raise self.error, ("No source for compiled code '%s'."
% filename)
filename = filename[0:-1]
elif ext != '.py':
raise self.error, "File '%s' not Python source." % filename
source = open(filename, 'r')
import parser
tree = parser.suite(source.read()).totuple(1)
source.close()
statements = {}
self.find_statements(tree, statements)
lines = statements.keys()
lines.sort()
result = filename, lines
self.analysis_cache[morf] = result
return result
# find_statements(tree, dict). Find each statement in the parse
# tree and record the line on which the statement starts in the
# dictionary (by assigning it to 1).
#
# It works by walking the whole tree depth-first. Every time it
# comes across a statement (symbol.stmt -- this includes compound
# statements like 'if' and 'while') it calls find_statement, which
# descends the tree below the statement to find the first terminal
# token in that statement and record the lines on which that token
# was found.
#
# This algorithm may find some lines several times (because of the
# grammar production statement -> compound statement -> statement),
# but that doesn't matter because we record lines as the keys of the
# dictionary.
#
# See also [GDR 2001-12-04b, 3.2].
def find_statements(self, tree, dict):
import symbol, token
if token.ISNONTERMINAL(tree[0]):
for t in tree[1:]:
self.find_statements(t, dict)
if tree[0] == symbol.stmt:
self.find_statement(tree[1], dict)
elif (tree[0] == token.NAME
and tree[1] in ['elif', 'except', 'finally']):
dict[tree[2]] = 1
def find_statement(self, tree, dict):
import token
while token.ISNONTERMINAL(tree[0]):
tree = tree[1]
dict[tree[2]] = 1
# format_lines(statements, lines). Format a list of line numbers
# for printing by coalescing groups of lines as long as the lines
# represent consecutive statements. This will coalesce even if
# there are gaps between statements, so if statements =
# [1,2,3,4,5,10,11,12,13,14] and lines = [1,2,5,10,11,13,14] then
# format_lines will return "1-2, 5-11, 13-14".
def format_lines(self, statements, lines):
pairs = []
i = 0
j = 0
start = None
pairs = []
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start == None:
start = lines[j]
end = lines[j]
j = j + 1
elif start:
pairs.append((start, end))
start = None
i = i + 1
if start:
pairs.append((start, end))
def stringify(pair):
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
import string
return string.join(map(stringify, pairs), ", ")
def analysis(self, morf):
filename, statements = self.analyze_morf(morf)
self.canonicalize_filenames()
if not self.cexecuted.has_key(filename):
self.cexecuted[filename] = {}
missing = []
for line in statements:
if not self.cexecuted[filename].has_key(line):
missing.append(line)
return (filename, statements, missing,
self.format_lines(statements, missing))
def morf_name(self, morf):
if isinstance(morf, types.ModuleType):
return morf.__name__
else:
if not morf:
return ""
return os.path.splitext(os.path.basename(morf))[0]
def report(self, morfs, show_missing=1, ignore_errors=0):
if not isinstance(morfs, types.ListType):
morfs = [morfs]
max_name = max([5,] + map(len, map(self.morf_name, morfs)))
fmt_name = "%%- %ds " % max_name
fmt_err = fmt_name + "%s: %s"
header = fmt_name % "Name" + " Stmts Exec Cover"
fmt_coverage = fmt_name + "% 6d % 6d % 5d%%"
if show_missing:
header = header + " Missing"
fmt_coverage = fmt_coverage + " %s"
print header
print "-" * len(header)
total_statements = 0
total_executed = 0
for morf in morfs:
name = self.morf_name(morf)
try:
_, statements, missing, readable = self.analysis(morf)
n = len(statements)
m = n - len(missing)
if n > 0:
pc = 100.0 * m / n
else:
pc = 100.0
args = (name, n, m, pc)
if show_missing:
args = args + (readable,)
print fmt_coverage % args
total_statements = total_statements + n
total_executed = total_executed + m
except KeyboardInterrupt:
raise
except:
if not ignore_errors:
type, msg = sys.exc_info()[0:2]
print fmt_err % (name, type, msg)
if len(morfs) > 1:
print "-" * len(header)
if total_statements > 0:
pc = 100.0 * total_executed / total_statements
else:
pc = 100.0
args = ("TOTAL", total_statements, total_executed, pc)
if show_missing:
args = args + ("",)
print fmt_coverage % args
# annotate(morfs, ignore_errors).
blank_re = re.compile("\\s*(#|$)")
else_re = re.compile("\\s*else\\s*:\\s*(#|$)")
def annotate(self, morfs, directory=None, ignore_errors=0):
for morf in morfs:
try:
filename, statements, missing, _ = self.analysis(morf)
source = open(filename, 'r')
if directory:
dest_file = os.path.join(directory,
os.path.basename(filename)
+ ',cover')
else:
dest_file = filename + ',cover'
dest = open(dest_file, 'w')
lineno = 0
i = 0
j = 0
covered = 1
while 1:
line = source.readline()
if line == '':
break
lineno = lineno + 1
while i < len(statements) and statements[i] < lineno:
i = i + 1
while j < len(missing) and missing[j] < lineno:
j = j + 1
if i < len(statements) and statements[i] == lineno:
covered = j >= len(missing) or missing[j] > lineno
if self.blank_re.match(line):
dest.write(' ')
elif self.else_re.match(line):
# Special logic for lines containing only
# 'else:'. See [GDR 2001-12-04b, 3.2].
if i >= len(statements) and j >= len(missing):
dest.write('! ')
elif i >= len(statements) or j >= len(missing):
dest.write('> ')
elif statements[i] == missing[j]:
dest.write('! ')
else:
dest.write('> ')
elif covered:
dest.write('> ')
else:
dest.write('! ')
dest.write(line)
source.close()
dest.close()
except KeyboardInterrupt:
raise
except:
if not ignore_errors:
raise
# Singleton object.
the_coverage = coverage()
# Module functions call methods in the singleton object.
def start(*args): return apply(the_coverage.start, args)
def stop(*args): return apply(the_coverage.stop, args)
def erase(*args): return apply(the_coverage.erase, args)
def analysis(*args): return apply(the_coverage.analysis, args)
def report(*args): return apply(the_coverage.report, args)
# Save coverage data when Python exits. (The atexit module wasn't
# introduced until Python 2.0, so use sys.exitfunc when it's not
# available.)
try:
import atexit
atexit.register(the_coverage.save)
except ImportError:
sys.exitfunc = the_coverage.save
# Command-line interface.
if __name__ == '__main__':
the_coverage.command_line()
# A. REFERENCES
#
# [GDR 2001-12-04a] "Statement coverage for Python"; Gareth Rees;
# Ravenbrook Limited; 2001-12-04;
# <http://www.garethrees.org/2001/12/04/python-coverage/>.
#
# [GDR 2001-12-04b] "Statement coverage for Python: design and
# analysis"; Gareth Rees; Ravenbrook Limited; 2001-12-04;
# <http://www.garethrees.org/2001/12/04/python-coverage/design.html>.
#
# [van Rossum 2001-07-20a] "Python Reference Manual (releae 2.1.1)";
# Guide van Rossum; 2001-07-20;
# <http://www.python.org/doc/2.1.1/ref/ref.html>.
#
# [van Rossum 2001-07-20b] "Python Library Reference"; Guido van Rossum;
# 2001-07-20; <http://www.python.org/doc/2.1.1/lib/lib.html>.
#
#
# B. DOCUMENT HISTORY
#
# 2001-12-04 GDR Created.
#
# 2001-12-06 GDR Added command-line interface and source code
# annotation.
#
# 2001-12-09 GDR Moved design and interface to separate documents.
#
# 2001-12-10 GDR Open cache file as binary on Windows. Allow
# simultaneous -e and -x, or -a and -r.
#
# 2001-12-12 GDR Added command-line help. Cache analysis so that it
# only needs to be done once when you specify -a and -r.
#
# 2001-12-13 GDR Improved speed while recording. Portable between
# Python 1.5.2 and 2.1.1.
#
# 2002-01-03 GDR Module-level functions work correctly.
#
# 2002-01-07 GDR Update sys.path when running a file with the -x option,
# so that it matches the value the program would get if it were run on
# its own.
#
#
# C. COPYRIGHT AND LICENCE
#
# Copyright 2001 Gareth Rees. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
#
#
# $Id$
|
gpl-2.0
| -8,898,638,311,535,234,000
| 34.376013
| 74
| 0.570028
| false
| 3.965661
| false
| false
| false
|
peterheim1/robbie
|
bin/robbie_brain.py
|
1
|
4654
|
#!/usr/bin/env python
# coding: utf-8
#
# Software License Agreement (GPLv2 License)
#
# Copyright (c) 2011 Thecorpora, S.L.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import rospy
from std_msgs.msg import String
import smach
import smach_ros
import signal
import subprocess
import time
from festival.srv import *
def run_process(command = ""):
if command != "":
return subprocess.Popen(command.split())
else:
return -1
def run_all_process(all_commands):
proc=[]
for command in all_commands:
proc.append(subprocess.Popen(command.split()))
return proc
def kill_all_process(processes):
for process in processes:
process.send_signal(signal.SIGINT)
def speak_this(text):
global speak_text_service
#speak_text_service(str(text))
speak_text_service(text)
####################
class CommonRobbieState(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["exit"])
self.state="none"
self.input_values={"STOP STATE MACHINE":"exit"}
self.next_state=""
self.launchers=[]
self.subscribe = None
def speech_callback(self, data):
sentence = data.data
#rospy.loginfo("Listened: |"+sentence+"|")
speak_text_service(sentence)
if self.state=="Default" and sentence == "HALT YOU ARE MOVE":
if robot_model.random_move:
run_process("rosnode kill /qbo_random_move")
robot_model.random_move = False
rospy.set_param("/qbo_face_following/move_base", False)
rospy.follow_face = False
speak_this("OK. I STOPPED MOVING")
return
try:
self.next_state=self.input_values[data.msg]
#self.next_state=self.input_values[lang_label]
except:
rospy.loginfo("Sentence not found")
###########################
#Define default state
class default(CommonRobbieState):
def __init__(self):
smach.State.__init__(self, outcomes=['mplayer','phone','questions','webi','battery',''])
self.state="Default"
self.input_values={"RUN MUSIC PLAYER":"mplayer", "RUN PHONE SERVICES":"phone","RUN WEB INTERFACE":"webi","LAUNCH CHAT MODE":"questions"}
self.launchers=["roslaunch qbo_brain default_state.launch"]
self.launchers=[]
def execute(self, userdata):
rospy.loginfo('Executing: State '+self.state)
self.next_state=""
pids=run_all_process(self.launchers)
#Check if robbie_listen is down
rosnode_list = runCmdOutput("rosnode list")
if rosnode_list.find("/robbie_listen") == -1:
run_process("rosnode kill /robbie_listen")
time.sleep(2)
run_process("roslaunch robbie a_voice_rec.launch")
#Subscribe to topics
#Listeners
self.subscrib=rospy.Subscriber("/speech_text", String, self.speech_callback)
speak_this("DEFAULT MODE IS ACTIVE")
while self.next_state=="" and not rospy.is_shutdown():
time.sleep(0.2)
rospy.loginfo("Waiting sentence")
if not rospy.is_shutdown():
speak_this("EXITING DEFAULT MODE")
self.subscribe.unregister()
rospy.loginfo("NextState: "+self.next_state)
active_check_face_object = False
kill_all_process(pids)
return self.next_state
def main():
rospy.init_node("phoenix_brain")
rospy.loginfo("Starting Phoenix Brain")
rospy.wait_for_service('speak_text')
try:
speak_text_service = rospy.ServiceProxy('speak_text', FestivalSpeech)
except rospy.ServiceException, e:
print "Failed to acquire Festival SpeakText service: %s"%e
rospy.spin()
if __name__ == '__main__':
main()
|
gpl-3.0
| -1,518,513,515,117,545,500
| 27.906832
| 144
| 0.609368
| false
| 3.881568
| false
| false
| false
|
tuturto/pyherc
|
src/pyherc/test/builders/action.py
|
1
|
12422
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for action factory builders
"""
from pyherc.rules.consume.factories import DrinkFactory
from pyherc.rules.digging.factories import DigFactory
from pyherc.rules.inventory.equip import EquipFactory
from pyherc.rules.inventory.factories import (DropFactory, InventoryFactory,
PickUpFactory)
from pyherc.rules.inventory.unequip import UnEquipFactory
from pyherc.rules.magic import GainDomainFactory, SpellCastingFactory
from pyherc.rules.mitosis.factory import MitosisFactory
from pyherc.rules.metamorphosis.factory import MetamorphosisFactory
from pyherc.rules.trapping.factory import TrappingFactory
from pyherc.rules.public import ActionFactory
from random import Random
class ActionFactoryBuilder():
"""
Class for building action factories
"""
def __init__(self):
"""
Default constructor
"""
super().__init__()
self.model = None
self.factories = []
self.effect_factory = None
self.use_real_drink_factory = False
self.use_real_inventory_factory = False
self.use_real_spellcasting_factory = False
self.use_real_gain_domain_factory = False
self.use_real_mitosis_factory = False
self.use_real_metamorphosis_factory = False
self.use_real_dig_factory = False
self.use_real_trapping_factory = False
def with_drink_factory(self, drink_factory=None):
"""
Configure action factory to use real drink factory
"""
if drink_factory is None:
self.use_real_drink_factory = True
else:
if hasattr(drink_factory, 'build'):
self.factories.append(drink_factory.build())
else:
self.factories.append(drink_factory)
return self
def with_spellcasting_factory(self, spellcasting_factory=None):
"""
Configure action factory to use real magic factory
.. versionadded:: 0.9
"""
if not spellcasting_factory:
self.use_real_spellcasting_factory = True
else:
if hasattr(spellcasting_factory, 'build'):
self.factories.append(spellcasting_factory.build())
else:
self.factories.append(spellcasting_factory)
return self
def with_inventory_factory(self):
"""
Configure action factory to use real inventory factory
"""
self.use_real_inventory_factory = True
return self
def with_effect_factory(self, effect_factory):
"""
Configure action factory to use effect factory
:param effect_factory: effect factory to use
:type effect_factory: EffectFactory
"""
self.effect_factory = effect_factory
return self
def with_gain_domain_factory(self, gain_domain_factory=None):
"""
Configure action factory to use gain domain factory
:param gain_domain_factory: gain domain factory to use
:type gain_domain_factory: GainDomainFactory
.. versionadded:: 0.10
"""
if gain_domain_factory:
self.factories.append(gain_domain_factory)
else:
self.use_real_gain_domain_factory = True
return self
def with_mitosis_factory(self, mitosis_factory=None):
"""
Configure action factory to use mitosis factory
"""
if mitosis_factory:
self.factories.append(mitosis_factory)
else:
self.use_real_mitosis_factory = True
return self
def with_metamorphosis_factory(self, metamorphosis_factory=None):
"""
Configure metamorphosis factory to use
"""
if metamorphosis_factory:
self.factories.append(metamorphosis_factory)
else:
self.use_real_metamorphosis_factory = True
return self
def with_dig_factory(self, dig_factory=None):
if dig_factory:
self.factories.append(dig_factory)
else:
self.use_real_dig_factory = True
return self
def with_trapping_factory(self, trapping_factory=None):
if trapping_factory:
self.factories.append(trapping_factory)
else:
self.use_real_trapping_factory = True
return self
def build(self):
"""
Build action factory
:returns: action factory
:rtype: ActionFactory
"""
if self.use_real_drink_factory:
self.factories.append((DrinkFactoryBuilder()
.with_effect_factory(self.effect_factory)
.build()))
if self.use_real_inventory_factory:
self.factories.append(InventoryFactory([PickUpFactory(),
DropFactory(),
EquipFactory(),
UnEquipFactory()]))
if self.use_real_spellcasting_factory:
self.factories.append(SpellCastingFactoryBuilder().build())
if self.use_real_gain_domain_factory:
self.factories.append(GainDomainFactoryBuilder().build())
if self.use_real_mitosis_factory:
self.factories.append(MitosisFactoryBuilder()
.build())
if self.use_real_metamorphosis_factory:
self.factories.append(MetamorphosisFactoryBuilder().build())
if self.use_real_dig_factory:
self.factories.append(DigFactoryBuilder().build())
if self.use_real_trapping_factory:
self.factories.append(TrappingFactoryBuilder().build())
action_factory = ActionFactory(self.model,
self.factories)
return action_factory
class DrinkFactoryBuilder():
"""
Class to build drink factories
"""
def __init__(self):
"""
Default constructor
"""
super().__init__()
self.effect_factory = None
def with_effect_factory(self, effect_factory):
"""
Set effect factory to use
"""
self.effect_factory = effect_factory
return self
def build(self):
"""
Builds drink factory
"""
return DrinkFactory(self.effect_factory)
class GainDomainFactoryBuilder():
"""
Builder for gain domain factory
..versionadded:: 0.10
"""
def __init__(self):
"""
Default constructor
"""
super().__init__()
def build(self):
"""
Builds the factory
"""
return GainDomainFactory()
class SpellCastingFactoryBuilder():
"""
Builder for spell casting factory
.. versionadded:: 0.9
"""
def __init__(self):
"""
Default constructor
"""
super().__init__()
self.spell_factory = None
self.use_real_spell_factory = False
self.effects_factory = None
self.use_real_effects_factory = False
def with_spell_factory(self, spell_factory=None):
"""
Configure spell factory to use
"""
if not spell_factory:
self.use_real_spell_factory = True
else:
if hasattr(spell_factory, 'build'):
self.spell_factory = spell_factory.build()
else:
self.spell_factory = spell_factory
return self
def with_effects_factory(self, effects_factory=None):
"""
Configure effects factory to use
"""
if effects_factory:
if hasattr(effects_factory, 'build'):
self.effects_factory = effects_factory.build()
else:
self.effects_factory = effects_factory
else:
self.use_real_effects_factory = True
return self
def build(self):
"""
Builds spell casting factory
"""
if self.use_real_spell_factory:
#self.spell_factory = None
pass
if self.use_real_effects_factory:
#self.effects_factory = None
pass
return SpellCastingFactory(spell_factory=self.spell_factory,
effects_factory=self.effects_factory)
class MitosisFactoryBuilder():
"""
Builder for mitosis factory
"""
def __init__(self):
"""
Default constructor
"""
super().__init__()
self.character_generator = None
self.character_limit = 30
self.rng = Random()
def with_character_limit(self, character_limit):
"""
Configure maximum amount of character at any given time
"""
self.character_limit = character_limit
return self
def with_character_generator(self, generator):
"""
Configure character generator to use
"""
self.character_generator = generator
return self
def with_random_number_generator(self, rng):
"""
Configure random number generator to use
"""
self.rng = rng
def build(self):
"""
Builds mitosis factory
"""
return MitosisFactory(character_generator=self.character_generator,
character_limit=self.character_limit,
rng=self.rng)
class MetamorphosisFactoryBuilder():
"""
Builder for metamorphosis factory
"""
def __init__(self):
"""
Default constructor
"""
super().__init__()
self.character_generator = None
self.rng = Random()
def with_character_generator(self, generator):
"""
Configure character generator to use
"""
self.character_generator = generator
return self
def with_random_number_generator(self, rng):
"""
Configure random number generator to use
"""
self.rng = rng
return self
def build(self):
"""
Builds metamorphosis factory
"""
return MetamorphosisFactory(character_generator=self.character_generator,
rng=self.rng)
class DigFactoryBuilder():
"""
Builder for dig factory
"""
def __init__(self):
"""
Default constructor
"""
super().__init__()
self.rng = Random()
def with_random_number_generator(rng):
"""
Configure random number generator to use
"""
self.rng = rng
return self
def build(self):
"""
Builds dig factory
"""
return DigFactory(self.rng)
class TrappingFactoryBuilder():
"""
Builder for trapping factory
"""
def __init__(self):
"""
Default constructor
"""
super().__init__()
self.trap_creator = None
def with_trap_creator(self, trap_creator):
"""
Configure used trap creator
"""
self.trap_creator = trap_creator
return self
def build(self):
"""
Builds trapping factory
"""
return TrappingFactory(self.trap_creator)
|
mit
| -5,472,398,915,691,913,000
| 27.821346
| 81
| 0.585815
| false
| 4.518734
| true
| false
| false
|
pivotal-energy-solutions/django-appsearch
|
demo_app/users/search.py
|
1
|
1036
|
# -*- coding: utf-8 -*-
"""search.py: Django """
import logging
from django.contrib.auth import get_user_model
from appsearch.registry import ModelSearch, search
__author__ = "Steven Klass"
__date__ = "08/07/2019 21:59"
__copyright__ = "Copyright 2011-2020 Pivotal Energy Solutions. All rights reserved."
__credits__ = [
"Artem Hruzd",
"Steven Klass",
]
log = logging.getLogger(__name__)
User = get_user_model()
class UserSearch(ModelSearch):
display_fields = (
("First", "first_name"),
("Last", "last_name"),
("Email", "email"),
("Work Phone", "work_phone"),
("Company", "company__name"),
("Active", "is_active"),
)
search_fields = (
("First", "first_name"),
("Last", "last_name"),
("Email", "email"),
{
"company": (
("Company Name", "name"),
("Company Type", "company_type"),
)
},
("Is Active", "is_active"),
)
search.register(User, UserSearch)
|
apache-2.0
| 7,934,878,455,402,386,000
| 20.142857
| 84
| 0.527027
| false
| 3.535836
| false
| false
| false
|
vipints/genomeutils
|
pairwise_distance/seq_alignment_run.py
|
1
|
2256
|
#!/usr/bin/env python
"""
Program to run different multiple sequence alignment programs.
Requirement:
mafft - http://mafft.cbrc.jp/alignment/server/index.html
clustalw2 - http://www.ebi.ac.uk/Tools/msa/clustalw2/
install these packages and set the path correctly if it is not a standard one.
"""
import os
import sys
import time
import tempfile
import subprocess
def MAFFTrun(infile, outfile, threads=2):
"""
mafft run command line
@args infile: fasta file with different genome sequence
@type infile: str
@args outfile: multiple sequence alignments are reported, example: CLUSTAL, PHYLIP, FASTA
@type outfile: str
@args threads: number of cores used for executing the program
@type threads: int
"""
outlog = "mafft_run-%s.log" % time.strftime("%Y_%m_%d_%H-%M-%S")
tlf = open(outlog, 'w')
# TODO: include more commandline features to mafft
out_order = "ALIGNED" # "INPUT" # ALIGNED
cl = ['mafft --thread %d --threadit %d --reorder --anysymbol --auto -OUTPUT=%s %s' % (threads, 0, outfile, infile)]
process = subprocess.Popen(' '.join(cl), shell=True, stderr=tlf, stdout=tlf)
returnval = process.wait()
if returnval !=0:
raise Exception, "Exit status return code = %i" % returnval
tlf.close()
def ClustalWrun(infile, outfile, data_type, outform):
"""
clustalw2 run
@args infile: fasta file with different genome sequence
@type infile: str
@args outfile: multiple sequence alignments are reported
@type outfile: str
@args data_type: DNA, PROTEIN
@type data_type: str
@args outform: CLUSTAL, PHYLIP, FASTA
@type outform: str
"""
outlog = "clustalw2_run-%s.log" % time.strftime("%Y_%m_%d_%H-%M-%S")
tlf = open(outlog,'w')
#TODO: include more commandline features to clustalw2
out_order = "ALIGNED" # "INPUT" # ALIGNED
cl = ['clustalw2 -INFILE=%s -OUTFILE=%s -OUTORDER=%s -TYPE=%s -OUTPUT=%s' % (infile, outfile, out_order, data_type, outform)]
process = subprocess.Popen(' '.join(cl), shell=True, stderr=tlf, stdout=tlf)
rval = process.wait()
if rval !=0:
raise Exception, "Exit status return code = %i" % rval
tlf.close()
|
bsd-3-clause
| -536,085,624,397,585,540
| 29.90411
| 129
| 0.64805
| false
| 3.317647
| false
| false
| false
|
AntonioMtn/NZBMegaSearch
|
werkzeug/contrib/atom.py
|
1
|
14958
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from ..utils import escape
from ..wrappers import BaseResponse
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': 'Unknown author'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url, True)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url, True)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url, True))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version, True))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Required.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author')
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base, True)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
|
gpl-2.0
| -2,882,186,171,540,563,000
| 42.609329
| 78
| 0.550809
| false
| 4.050366
| false
| false
| false
|
openmotics/gateway
|
src/gateway/migrations/orm/019_fix_user_foreign_keys.py
|
1
|
4180
|
# Copyright (C) 2021 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from peewee import (
Model, Database, SqliteDatabase,
AutoField, CharField, IntegerField,
ForeignKeyField, BooleanField, FloatField,
TextField, SQL
)
from peewee_migrate import Migrator
import constants
if False: # MYPY
from typing import Dict, Any
def migrate(migrator, database, fake=False, **kwargs):
# type: (Migrator, Database, bool, Dict[Any, Any]) -> None
class BaseModel(Model):
class Meta:
database = SqliteDatabase(constants.get_gateway_database_file(),
pragmas={'foreign_keys': 1})
class Apartment(BaseModel):
id = AutoField(constraints=[SQL('AUTOINCREMENT')], unique=True)
name = CharField(null=False)
mailbox_rebus_id = IntegerField(unique=True)
doorbell_rebus_id = IntegerField(unique=True)
class User(BaseModel):
class UserRoles(object):
USER = 'USER'
ADMIN = 'ADMIN'
TECHNICIAN = 'TECHNICIAN'
COURIER = 'COURIER'
class UserLanguages(object):
EN = 'English'
DE = 'Deutsh'
NL = 'Nederlands'
FR = 'Francais'
id = AutoField(constraints=[SQL('AUTOINCREMENT')], unique=True)
username = CharField(null=False, unique=True)
first_name = CharField(null=True)
last_name = CharField(null=True)
role = CharField(default=UserRoles.USER, null=False, ) # options USER, ADMIN, TECHINICAN, COURIER
pin_code = CharField(null=True, unique=True)
language = CharField(null=False, default='English') # options: See Userlanguages
password = CharField()
apartment_id = ForeignKeyField(Apartment, null=True, default=None, backref='users', on_delete='SET NULL')
is_active = BooleanField(default=True)
accepted_terms = IntegerField(default=0)
class RFID(BaseModel):
id = AutoField(constraints=[SQL('AUTOINCREMENT')], unique=True)
tag_string = CharField(null=False, unique=True)
uid_manufacturer = CharField(null=False, unique=True)
uid_extension = CharField(null=True)
enter_count = IntegerField(null=False)
blacklisted = BooleanField(null=False, default=False)
label = CharField()
timestamp_created = CharField(null=False)
timestamp_last_used = CharField(null=True)
user_id = ForeignKeyField(User, null=False, backref='rfids', on_delete='CASCADE')
class Delivery(BaseModel):
class DeliveryType(object):
DELIVERY = 'DELIVERY'
RETURN = 'RETURN'
id = AutoField(constraints=[SQL('AUTOINCREMENT')], unique=True)
type = CharField(null=False) # options: DeliveryType
timestamp_delivery = CharField(null=False)
timestamp_pickup = CharField(null=True)
courier_firm = CharField(null=True)
signature_delivery = CharField(null=True)
signature_pickup = CharField(null=True)
parcelbox_rebus_id = IntegerField(null=False)
user_delivery = ForeignKeyField(User, backref='deliveries', on_delete='NO ACTION', null=True)
user_pickup = ForeignKeyField(User, backref='pickups', on_delete='NO ACTION', null=False)
migrator.drop_table(Delivery)
migrator.drop_table(RFID)
migrator.create_table(Delivery)
migrator.create_table(RFID)
def rollback(migrator, database, fake=False, **kwargs):
# type: (Migrator, Database, bool, Dict[Any, Any]) -> None
pass
|
agpl-3.0
| -7,860,201,654,605,407,000
| 38.065421
| 113
| 0.663158
| false
| 3.899254
| false
| false
| false
|
Pajn/RAXA-Django
|
RAXA/urls.py
|
1
|
1596
|
'''
Copyright (C) 2013 Rasmus Eneman <rasmus@eneman.eu>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from django.conf.urls import patterns, include, url
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls.static import static
from RAXA.settings import MEDIA_URL, MEDIA_ROOT, INSTALLED_PLUGINS
urls = (url(r'^api/', include('api.urls')),
url(r'^backend/', include('backend.urls')),)
i18n_urls = (url(r'^', include('desktop.urls')),
url(r'^mobile/', include('mobile.urls')),
url(r'^common/', include('common.urls')),
url(r'^tablet/', include('tablet.urls')),)
for plugin in INSTALLED_PLUGINS:
try:
urls += (url(r'^', include('%s.urls' % plugin)),)
except ImportError:
pass
try:
i18n_urls += (url(r'^', include('%s.i18n_urls' % plugin)),)
except ImportError:
pass
urlpatterns = patterns('', *urls) + static(MEDIA_URL, document_root=MEDIA_ROOT)
urlpatterns += i18n_patterns('', *i18n_urls)
|
agpl-3.0
| 5,589,374,140,238,752,000
| 37.02381
| 79
| 0.692982
| false
| 3.685912
| false
| false
| false
|
tuffery/Frog2
|
frowns/extensions/vflib/NetworkGraph/test.py
|
1
|
1347
|
from NetworkGraph.GraphObject import GraphObject
N = GraphObject()
print N.handle
M = GraphObject()
print M.handle
print N == M
print N is M
print N.handle == M.handle
from NetworkGraph.Graph import Graph
g = Graph()
from NetworkGraph.GraphObject import GraphNode
node1 = GraphNode()
node2 = GraphNode(label="blue")
print node1 == node2
g.add_node(node1)
g.add_node(node2)
print g.has_node(node1)
from NetworkGraph.GraphObject import GraphEdge
edge1 = GraphEdge(label="my dog has fleas")
g.add_edge(edge1, node1, node2)
print g.has_edge(edge1)
print edge1.nodes
n = edge1.xnode(node1)
print n is node2
print n.handle == node2.handle
g.dump()
g.remove_edge(edge1)
g.dump()
g.add_edge(edge1, node1, node2)
h = g.to_graph()
matcher = g.to_matcher()
results = matcher.match(h)
for nodes, edges in results:
print nodes
print edges
clone = g.clone()
for node in g.nodes:
assert not clone.has_node(node)
for original, cloned in zip(g.nodes, clone.nodes):
assert original == cloned
assert original is not cloned
node3 = GraphNode("I am a clone!")
edge2 = GraphEdge("new edge")
clone.add_node(node3)
n1 = clone.nodes[0]
clone.add_edge(edge2, node3, n1)
matchableClone = clone.to_graph()
results = matcher.umatch(matchableClone)
nodes, edges = results[0]
partialClone = clone.clone(ignoreNodes=nodes, ignoreEdges=edges)
partialClone.dump()
|
gpl-3.0
| -3,975,437,605,503,335,000
| 24.903846
| 64
| 0.743875
| false
| 2.915584
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure_py3.py
|
1
|
2005
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .catalog_item_py3 import CatalogItem
class USqlProcedure(CatalogItem):
"""A Data Lake Analytics catalog U-SQL procedure item.
:param compute_account_name: the name of the Data Lake Analytics account.
:type compute_account_name: str
:param version: the version of the catalog item.
:type version: str
:param database_name: the name of the database.
:type database_name: str
:param schema_name: the name of the schema associated with this procedure
and database.
:type schema_name: str
:param name: the name of the procedure.
:type name: str
:param definition: the defined query of the procedure.
:type definition: str
"""
_attribute_map = {
'compute_account_name': {'key': 'computeAccountName', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'database_name': {'key': 'databaseName', 'type': 'str'},
'schema_name': {'key': 'schemaName', 'type': 'str'},
'name': {'key': 'procName', 'type': 'str'},
'definition': {'key': 'definition', 'type': 'str'},
}
def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, definition: str=None, **kwargs) -> None:
super(USqlProcedure, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs)
self.database_name = database_name
self.schema_name = schema_name
self.name = name
self.definition = definition
|
mit
| -5,115,003,405,439,517,000
| 41.659574
| 181
| 0.611471
| false
| 4.050505
| false
| false
| false
|
singhdev/streamparse
|
streamparse/ipc.py
|
1
|
8967
|
"""
Utilities for interprocess communication between Python and Storm.
"""
from __future__ import absolute_import, print_function, unicode_literals
try:
import simplejson as json
except ImportError:
import json
import logging
import logging.handlers
import os
import sys
from collections import deque
from threading import RLock
from six import PY3
# Module globals
_PYTHON_LOG_LEVELS = {
'critical': logging.CRITICAL,
'error': logging.ERROR,
'warning': logging.WARNING,
'info': logging.INFO,
'debug': logging.DEBUG
}
_log = logging.getLogger('streamparse.ipc')
# pending commands/tuples we read while trying to read task IDs
_pending_commands = deque()
# pending task IDs we read while trying to read commands/tuples
_pending_task_ids = deque()
_pid = os.getpid()
_debug = False
_topology_name = _component_name = _task_id = _conf = _context = None
_reader_lock = RLock()
_writer_lock = RLock()
# Setup stdin line reader and stdout
if PY3:
# Ensure we don't fall back on the platform-dependent encoding and always
# use UTF-8 https://docs.python.org/3.4/library/sys.html#sys.stdin
import io
_readline = io.TextIOWrapper(sys.stdin.buffer,
encoding='utf-8').readline
else:
def _readline():
line = sys.stdin.readline()
return line.decode('utf-8')
_stdout = sys.stdout
# Travis CI has stdout set to an io.StringIO object instead of an
# io.BufferedWriter object which is what's actually used when streamparse is
# running
if hasattr(sys.stdout, 'buffer'):
_stdout = sys.stdout.buffer
else:
_stdout = sys.stdout
class LogStream(object):
"""Object that implements enough of the Python stream API to be used as
sys.stdout and sys.stderr. Messages are written to the Python logger.
"""
def __init__(self, logger):
self.logger = logger
def write(self, message):
if message.strip() == "":
return # skip blank lines
try:
self.logger.info(message)
except:
# There's been an issue somewhere in the logging sub-system
# so we'll put stderr and stdout back to their originals and
# raise the exception which will cause Storm to choke
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
raise
def flush(self):
"""No-op method to prevent crashes when someone does
sys.stdout.flush.
"""
pass
class Tuple(object):
"""Storm's primitive data type passed around via streams.
:ivar id: the ID of the tuple.
:type id: str
:ivar component: component that the tuple was generated from.
:type component: str
:ivar stream: the stream that the tuple was emitted into.
:type stream: str
:ivar task: the task the tuple was generated from.
:type task: int
:ivar values: the payload of the tuple where data is stored.
:type values: list
"""
__slots__ = ['id', 'component', 'stream', 'task', 'values']
def __init__(self, id, component, stream, task, values):
self.id = id
self.component = component
self.stream = stream
self.task = task
self.values = values
def __repr__(self):
return ('Tuple(id={!r}, component={!r}, stream={!r}, task={!r}, '
'values={!r})'
.format(self.id, self.component, self.stream, self.task,
self.values))
# Message recieving
def read_message():
"""Read a message from Storm, reconstruct newlines appropriately.
All of Storm's messages (for either Bolts or Spouts) should be of the form:
'<command or task_id form prior emit>\nend\n'
Command example, an incoming tuple to a bolt:
'{ "id": "-6955786537413359385", "comp": "1", "stream": "1", "task": 9, "tuple": ["snow white and the seven dwarfs", "field2", 3]}\nend\n'
Command example for a Spout to emit it's next tuple:
'{"command": "next"}\nend\n'
Example, the task IDs a prior emit was sent to:
'[12, 22, 24]\nend\n'
The edge case of where we read '' from _readline indicating EOF, usually
means that communication with the supervisor has been severed.
"""
msg = ""
num_blank_lines = 0
while True:
# readline will return trailing \n so that output is unambigious, we
# should only have line == '' if we're at EOF
with _reader_lock:
line = _readline()
if line == 'end\n':
break
elif line == '':
_log.error("Received EOF while trying to read stdin from Storm, "
"pipe appears to be broken, exiting.")
sys.exit(1)
elif line == '\n':
num_blank_lines += 1
if num_blank_lines % 1000 == 0:
_log.warn("While trying to read a command or pending task ID, "
"Storm has instead sent {:,} '\\n' messages."
.format(num_blank_lines))
continue
msg = '{}{}\n'.format(msg, line[0:-1])
try:
return json.loads(msg)
except Exception:
_log.error("JSON decode error for message: %r", msg, exc_info=True)
raise
def read_task_ids():
if _pending_task_ids:
return _pending_task_ids.popleft()
else:
msg = read_message()
while not isinstance(msg, list):
_pending_commands.append(msg)
msg = read_message()
return msg
def read_command():
if _pending_commands:
return _pending_commands.popleft()
else:
msg = read_message()
while isinstance(msg, list):
_pending_task_ids.append(msg)
msg = read_message()
return msg
def read_tuple():
cmd = read_command()
return Tuple(cmd['id'], cmd['comp'], cmd['stream'], cmd['task'],
cmd['tuple'])
def read_handshake():
"""Read and process an initial handshake message from Storm."""
global _topology_name, _component_name, _task_id, _conf, _context, _debug
msg = read_message()
pid_dir, _conf, _context = msg['pidDir'], msg['conf'], msg['context']
# Write a blank PID file out to the pidDir
open('{}/{}'.format(pid_dir, str(_pid)), 'w').close()
send_message({'pid': _pid})
# Set up globals
_topology_name = _conf.get('topology.name', '')
_task_id = _context.get('taskid', '')
_component_name = _context.get('task->component', {}).get(str(_task_id), '')
_debug = _conf.get('topology.debug', False)
# Set up logging
log_path = _conf.get('streamparse.log.path')
if log_path:
root_log = logging.getLogger()
max_bytes = _conf.get('stremparse.log.max_bytes', 1000000) # 1 MB
backup_count = _conf.get('streamparse.log.backup_count', 10)
log_file = ('{log_path}/streamparse_{topology_name}_{component_name}_'
'{task_id}_{pid}.log'
.format(log_path=log_path, topology_name=_topology_name,
component_name=_component_name, task_id=_task_id,
pid=_pid))
handler = logging.handlers.RotatingFileHandler(log_file,
maxBytes=max_bytes,
backupCount=backup_count)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root_log.addHandler(handler)
log_level = _conf.get('streamparse.log.level', 'info')
log_level = _PYTHON_LOG_LEVELS.get(log_level, logging.INFO)
if _debug:
# potentially override logging that was provided if topology.debug
# was set to true
log_level = logging.DEBUG
root_log.setLevel(log_level)
else:
send_message({
'command': 'log',
'msg': ('WARNING: streamparse logging is not configured. Please '
'set streamparse.log.path in you config.json.')})
# Redirect stdout and stderr to ensure that print statements/functions
# won't disrupt the multilang protocol
sys.stdout = LogStream(logging.getLogger('streamparse.stdout'))
sys.stderr = LogStream(logging.getLogger('streamparse.stderr'))
_log.info('Received initial handshake message from Storm\n%r', msg)
_log.info('Process ID (%d) sent to Storm', _pid)
return _conf, _context
# Message sending
def send_message(message):
"""Send a message to Storm via stdout."""
if not isinstance(message, dict):
_log.error("%s.%d attempted to send a non dict message to Storm: %r",
_component_name, _pid, message)
return
wrapped_msg = "{}\nend\n".format(json.dumps(message)).encode('utf-8')
with _writer_lock:
_stdout.flush()
_stdout.write(wrapped_msg)
_stdout.flush()
|
apache-2.0
| 6,611,379,435,026,955,000
| 31.966912
| 143
| 0.597413
| false
| 3.880138
| false
| false
| false
|
beni55/pre-commit
|
setup.py
|
1
|
1759
|
from setuptools import find_packages
from setuptools import setup
setup(
name='pre_commit',
description=(
'A framework for managing and maintaining multi-language pre-commit '
'hooks.'
),
url='https://github.com/pre-commit/pre-commit',
version='0.5.5',
author='Anthony Sottile',
author_email='asottile@umich.edu',
platforms='linux',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages('.', exclude=('tests*', 'testing*')),
package_data={
'pre_commit': [
'resources/hook-tmpl',
'resources/pre-push-tmpl',
'resources/rbenv.tar.gz',
'resources/ruby-build.tar.gz',
'resources/ruby-download.tar.gz',
]
},
install_requires=[
'argparse',
'aspy.yaml',
'cached-property',
'jsonschema',
'nodeenv>=0.11.1',
'ordereddict',
'pyyaml',
'simplejson',
'virtualenv-hax',
],
entry_points={
'console_scripts': [
'pre-commit = pre_commit.main:main',
'pre-commit-validate-config = pre_commit.clientlib.validate_config:run', # noqa
'pre-commit-validate-manifest = pre_commit.clientlib.validate_manifest:run', # noqa
],
},
)
|
mit
| 4,702,655,874,889,281,000
| 29.327586
| 96
| 0.563388
| false
| 4.062356
| false
| false
| false
|
woopsi/newbie-app-install
|
src/main_functions.py
|
1
|
2603
|
import os,fnmatch
from gi.repository import Gtk, GObject,GdkPixbuf
def get_app_info_path(code):
home_path = os.path.expanduser('~/')
return{
1: home_path + ".ap_helper/info/1/",
2: home_path + ".ap_helper/info/2/",
3: home_path + ".ap_helper/info/3/",
4: home_path + ".ap_helper/info/4/",
5: home_path + ".ap_helper/info/5/",
6: home_path + ".ap_helper/info/6/",
7: home_path + ".ap_helper/info/7/"
}[code]
def get_app_installer_path(code):
home_path = os.path.expanduser('~/')
return{
1: home_path + ".ap_helper/installers/1/",
2: home_path + ".ap_helper/installers/2/",
3: home_path + ".ap_helper/installers/3/",
4: home_path + ".ap_helper/installers/4/",
5: home_path + ".ap_helper/installers/5/",
6: home_path + ".ap_helper/installers/6/",
7: home_path + ".ap_helper/installers/7/"
}[code]
def get_icon_path(category):
home_path = os.path.expanduser('~/')
return{
1: home_path + ".ap_helper/logos/app_logos/1/",
2: home_path + ".ap_helper/logos/app_logos/2/",
3: home_path + ".ap_helper/logos/app_logos/3/",
4: home_path + ".ap_helper/logos/app_logos/4/",
5: home_path + ".ap_helper/logos/app_logos/5/",
6: home_path + ".ap_helper/logos/app_logos/6/",
7: home_path + ".ap_helper/logos/app_logos/7/",
8: home_path + ".ap_helper/logos/main_logos/"
}[category]
def get_app_logo(category,number):
path = get_icon_path(category)
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(filename=path+ str(number) +'.png',width=50,height=50, preserve_aspect_ratio=True)
img = Gtk.Image.new_from_pixbuf(pixbuf)
return img
def destroy_window(self,window):
window.destroy()
def create_back_button():
button = Gtk.Button("Back")
button.set_always_show_image(True)
path = get_icon_path(8)
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(filename=path+"back.png",width=50,height=50, preserve_aspect_ratio=True)
img = Gtk.Image.new_from_pixbuf(pixbuf)
button.set_image(img)
button.set_size_request(500,50)
return button
def get_app_number(category):
num = len(fnmatch.filter(os.listdir(get_icon_path(category)), '*.png'))
return num
def get_application_names(category):
path = get_icon_path(category)
names_list = []
category = str(category)
name_files = open(path + category + ".txt")
names_list = name_files.readlines()
names_list = [x.strip() for x in names_list]
return names_list
|
gpl-3.0
| -1,004,511,021,433,330,200
| 33.706667
| 135
| 0.608144
| false
| 2.911633
| false
| false
| false
|
hugo-lorenzo-mato/meteo-galicia-db
|
django/www/MeteoGaliciaDB/registros/views.py
|
1
|
2191
|
from django.shortcuts import render
from . import forms
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate,login,logout
# Create your views here.
@login_required
def special(request):
return HttpResponse("Estás logueado!")
@login_required
def user_logout(request):
logout(request)
logged = False
return render(request, 'registros/login.html',{'logged':logged})
def registro(request):
registered = False
if request.method == 'POST':
user_form = forms.UserForm(data=request.POST)
profile_form = forms.UserProfileInfo(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'profile_pic' in request.FILES:
profile.profile_pic = request.FILES['profile_pic']
profile.save()
registered = True
else:
print(user_form.errors,profile_form.errors)
else:
user_form = forms.UserForm()
profile_form = forms.UserProfileInfo()
return render(request,'registros/registration.html', {'registered': registered, 'user_form': user_form,'profile_form':profile_form})
def user_login(request):
logged = False
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
logged = True
return render(request, 'registros/login.html', {'logged': logged})
else:
return HttpResponse("Cuenta inactiva")
else:
print("Alguien intento loguearse y falló")
return HttpResponse("Datos de acceso inválidos")
else:
return render(request, 'registros/login.html',{'logged':logged})
|
mit
| -4,794,422,289,503,203,000
| 32.166667
| 136
| 0.640311
| false
| 4.183556
| false
| false
| false
|
dstufft/warehouse
|
warehouse/csrf.py
|
1
|
2444
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyramid.httpexceptions import HTTPMethodNotAllowed
from pyramid.viewderivers import INGRESS, csrf_view
SAFE_METHODS = {"GET", "HEAD", "OPTIONS"}
def require_method_view(view, info):
require_methods = info.options.get("require_methods", SAFE_METHODS)
explicit = bool(info.options.get("require_methods"))
# Support @view_config(require_methods=False) to disable this view deriver.
if not require_methods:
return view
def wrapped(context, request):
# If the current request is using an unallowed method then we'll reject
# it *UNLESS* it is an exception view, then we'll allow it again
# *UNLESS* the exception view set an explicit require_methods itself.
if request.method not in require_methods and (
getattr(request, "exception", None) is None or explicit
):
raise HTTPMethodNotAllowed(
headers={"Allow": ", ".join(sorted(require_methods))}
)
return view(context, request)
return wrapped
require_method_view.options = {"require_methods"}
def includeme(config):
# Turn on all of our CSRF checks by default.
config.set_default_csrf_options(require_csrf=True)
# We want to shuffle things around so that the csrf_view comes over the
# secured_view because we do not want to access the ambient authority
# provided by the session cookie without first checking to ensure that this
# is not a cross-site request.
config.add_view_deriver(csrf_view, under=INGRESS, over="secured_view")
# We also want to add a view deriver that will ensure that only allowed
# methods get called on particular views. This needs to happen prior to
# the CSRF checks happening to prevent the CSRF checks from firing on
# views that don't expect them to.
config.add_view_deriver(require_method_view, under=INGRESS, over="csrf_view")
|
apache-2.0
| 1,147,295,129,712,310,300
| 39.065574
| 81
| 0.713584
| false
| 4.100671
| false
| false
| false
|
mathemage/h2o-3
|
h2o-py/h2o/model/metrics_base.py
|
1
|
25978
|
# -*- encoding: utf-8 -*-
"""
Regression model.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import imp
from h2o.model.confusion_matrix import ConfusionMatrix
from h2o.utils.backward_compatibility import backwards_compatible
from h2o.utils.compatibility import * # NOQA
from h2o.utils.typechecks import assert_is_type, assert_satisfies, numeric
class MetricsBase(backwards_compatible()):
"""
A parent class to house common metrics available for the various Metrics types.
The methods here are available across different model categories.
"""
def __init__(self, metric_json, on=None, algo=""):
super(MetricsBase, self).__init__()
# Yep, it's messed up...
if isinstance(metric_json, MetricsBase): metric_json = metric_json._metric_json
self._metric_json = metric_json
# train and valid and xval are not mutually exclusive -- could have a test. train and
# valid only make sense at model build time.
self._on_train = False
self._on_valid = False
self._on_xval = False
self._algo = algo
if on == "training_metrics":
self._on_train = True
elif on == "validation_metrics":
self._on_valid = True
elif on == "cross_validation_metrics":
self._on_xval = True
elif on is None:
pass
else:
raise ValueError("on expected to be train,valid,or xval. Got: " + str(on))
@classmethod
def make(cls, kvs):
"""Factory method to instantiate a MetricsBase object from the list of key-value pairs."""
return cls(metric_json=dict(kvs))
def __repr__(self):
# FIXME !!! __repr__ should never print anything, but return a string
self.show()
return ""
# TODO: convert to actual fields list
def __getitem__(self, key):
return self._metric_json.get(key)
@staticmethod
def _has(dictionary, key):
return key in dictionary and dictionary[key] is not None
def show(self):
"""Display a short summary of the metrics."""
metric_type = self._metric_json['__meta']['schema_type']
types_w_glm = ['ModelMetricsRegressionGLM', 'ModelMetricsBinomialGLM']
types_w_clustering = ['ModelMetricsClustering']
types_w_mult = ['ModelMetricsMultinomial']
types_w_bin = ['ModelMetricsBinomial', 'ModelMetricsBinomialGLM']
types_w_r2 = ['ModelMetricsRegressionGLM']
types_w_mean_residual_deviance = ['ModelMetricsRegressionGLM', 'ModelMetricsRegression']
types_w_mean_absolute_error = ['ModelMetricsRegressionGLM', 'ModelMetricsRegression']
types_w_logloss = types_w_bin + types_w_mult
types_w_dim = ["ModelMetricsGLRM"]
print()
print(metric_type + ": " + self._algo)
reported_on = "** Reported on {} data. **"
if self._on_train:
print(reported_on.format("train"))
elif self._on_valid:
print(reported_on.format("validation"))
elif self._on_xval:
print(reported_on.format("cross-validation"))
else:
print(reported_on.format("test"))
print()
print("MSE: " + str(self.mse()))
print("RMSE: " + str(self.rmse()))
if metric_type in types_w_mean_absolute_error:
print("MAE: " + str(self.mae()))
print("RMSLE: " + str(self.rmsle()))
if metric_type in types_w_r2:
print("R^2: " + str(self.r2()))
if metric_type in types_w_mean_residual_deviance:
print("Mean Residual Deviance: " + str(self.mean_residual_deviance()))
if metric_type in types_w_logloss:
print("LogLoss: " + str(self.logloss()))
if metric_type == 'ModelMetricsBinomial':
# second element for first threshold is the actual mean per class error
print("Mean Per-Class Error: %s" % self.mean_per_class_error()[0][1])
if metric_type == 'ModelMetricsMultinomial':
print("Mean Per-Class Error: " + str(self.mean_per_class_error()))
if metric_type in types_w_glm:
print("Null degrees of freedom: " + str(self.null_degrees_of_freedom()))
print("Residual degrees of freedom: " + str(self.residual_degrees_of_freedom()))
print("Null deviance: " + str(self.null_deviance()))
print("Residual deviance: " + str(self.residual_deviance()))
print("AIC: " + str(self.aic()))
if metric_type in types_w_bin:
print("AUC: " + str(self.auc()))
print("Gini: " + str(self.gini()))
self.confusion_matrix().show()
self._metric_json["max_criteria_and_metric_scores"].show()
if self.gains_lift():
print(self.gains_lift())
if metric_type in types_w_mult:
self.confusion_matrix().show()
self.hit_ratio_table().show()
if metric_type in types_w_clustering:
print("Total Within Cluster Sum of Square Error: " + str(self.tot_withinss()))
print("Total Sum of Square Error to Grand Mean: " + str(self.totss()))
print("Between Cluster Sum of Square Error: " + str(self.betweenss()))
self._metric_json['centroid_stats'].show()
if metric_type in types_w_dim:
print("Sum of Squared Error (Numeric): " + str(self.num_err()))
print("Misclassification Error (Categorical): " + str(self.cat_err()))
def r2(self):
"""The R squared coefficient."""
return self._metric_json["r2"]
def logloss(self):
"""Log loss."""
return self._metric_json["logloss"]
def nobs(self):
"""The number of observations."""
return self._metric_json["nobs"]
def mean_residual_deviance(self):
"""The mean residual deviance for this set of metrics."""
return self._metric_json["mean_residual_deviance"]
def auc(self):
"""The AUC for this set of metrics."""
return self._metric_json['AUC']
def aic(self):
"""The AIC for this set of metrics."""
return self._metric_json['AIC']
def gini(self):
"""Gini coefficient."""
return self._metric_json['Gini']
def mse(self):
"""The MSE for this set of metrics."""
return self._metric_json['MSE']
def rmse(self):
"""The RMSE for this set of metrics."""
return self._metric_json['RMSE']
def mae(self):
"""The MAE for this set of metrics."""
return self._metric_json['mae']
def rmsle(self):
"""The RMSLE for this set of metrics."""
return self._metric_json['rmsle']
def residual_deviance(self):
"""The residual deviance if the model has it, otherwise None."""
if MetricsBase._has(self._metric_json, "residual_deviance"):
return self._metric_json["residual_deviance"]
return None
def residual_degrees_of_freedom(self):
"""The residual DoF if the model has residual deviance, otherwise None."""
if MetricsBase._has(self._metric_json, "residual_degrees_of_freedom"):
return self._metric_json["residual_degrees_of_freedom"]
return None
def null_deviance(self):
"""The null deviance if the model has residual deviance, otherwise None."""
if MetricsBase._has(self._metric_json, "null_deviance"):
return self._metric_json["null_deviance"]
return None
def null_degrees_of_freedom(self):
"""The null DoF if the model has residual deviance, otherwise None."""
if MetricsBase._has(self._metric_json, "null_degrees_of_freedom"):
return self._metric_json["null_degrees_of_freedom"]
return None
def mean_per_class_error(self):
"""The mean per class error."""
return self._metric_json['mean_per_class_error']
# Deprecated functions; left here for backward compatibility
_bcim = {
"giniCoef": lambda self, *args, **kwargs: self.gini(*args, **kwargs)
}
class H2ORegressionModelMetrics(MetricsBase):
"""
This class provides an API for inspecting the metrics returned by a regression model.
It is possible to retrieve the R^2 (1 - MSE/variance) and MSE.
"""
def __init__(self, metric_json, on=None, algo=""):
super(H2ORegressionModelMetrics, self).__init__(metric_json, on, algo)
class H2OClusteringModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OClusteringModelMetrics, self).__init__(metric_json, on, algo)
def tot_withinss(self):
"""The Total Within Cluster Sum-of-Square Error, or None if not present."""
if MetricsBase._has(self._metric_json, "tot_withinss"):
return self._metric_json["tot_withinss"]
return None
def totss(self):
"""The Total Sum-of-Square Error to Grand Mean, or None if not present."""
if MetricsBase._has(self._metric_json, "totss"):
return self._metric_json["totss"]
return None
def betweenss(self):
"""The Between Cluster Sum-of-Square Error, or None if not present."""
if MetricsBase._has(self._metric_json, "betweenss"):
return self._metric_json["betweenss"]
return None
class H2OMultinomialModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OMultinomialModelMetrics, self).__init__(metric_json, on, algo)
def confusion_matrix(self):
"""Returns a confusion matrix based of H2O's default prediction threshold for a dataset."""
return self._metric_json['cm']['table']
def hit_ratio_table(self):
"""Retrieve the Hit Ratios."""
return self._metric_json['hit_ratio_table']
class H2OBinomialModelMetrics(MetricsBase):
"""
This class is essentially an API for the AUC object.
This class contains methods for inspecting the AUC for different criteria.
To input the different criteria, use the static variable `criteria`.
"""
def __init__(self, metric_json, on=None, algo=""):
"""
Create a new Binomial Metrics object (essentially a wrapper around some json)
:param metric_json: A blob of json holding all of the needed information
:param on_train: Metrics built on training data (default is False)
:param on_valid: Metrics built on validation data (default is False)
:param on_xval: Metrics built on cross validation data (default is False)
:param algo: The algorithm the metrics are based off of (e.g. deeplearning, gbm, etc.)
:returns: A new H2OBinomialModelMetrics object.
"""
super(H2OBinomialModelMetrics, self).__init__(metric_json, on, algo)
def F1(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The F1 for the given set of thresholds.
"""
return self.metric("f1", thresholds=thresholds)
def F2(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The F2 for this set of metrics and thresholds.
"""
return self.metric("f2", thresholds=thresholds)
def F0point5(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The F0.5 for this set of metrics and thresholds.
"""
return self.metric("f0point5", thresholds=thresholds)
def accuracy(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The accuracy for this set of metrics and thresholds.
"""
return self.metric("accuracy", thresholds=thresholds)
def error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The error for this set of metrics and thresholds.
"""
return 1 - self.metric("accuracy", thresholds=thresholds)
def precision(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The precision for this set of metrics and thresholds.
"""
return self.metric("precision", thresholds=thresholds)
def tpr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The True Postive Rate.
"""
return self.metric("tpr", thresholds=thresholds)
def tnr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The True Negative Rate.
"""
return self.metric("tnr", thresholds=thresholds)
def fnr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The False Negative Rate.
"""
return self.metric("fnr", thresholds=thresholds)
def fpr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The False Positive Rate.
"""
return self.metric("fpr", thresholds=thresholds)
def recall(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: Recall for this set of metrics and thresholds.
"""
return self.metric("tpr", thresholds=thresholds)
def sensitivity(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: Sensitivity or True Positive Rate for this set of metrics and thresholds.
"""
return self.metric("tpr", thresholds=thresholds)
def fallout(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The fallout (same as False Positive Rate) for this set of metrics and thresholds.
"""
return self.metric("fpr", thresholds=thresholds)
def missrate(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: THe miss rate (same as False Negative Rate).
"""
return self.metric("fnr", thresholds=thresholds)
def specificity(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The specificity (same as True Negative Rate).
"""
return self.metric("tnr", thresholds=thresholds)
def mcc(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: The absolute MCC (a value between 0 and 1, 0 being totally dissimilar, 1 being identical).
"""
return self.metric("absolute_mcc", thresholds=thresholds)
def max_per_class_error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: Return 1 - min(per class accuracy).
"""
return 1 - self.metric("min_per_class_accuracy", thresholds=thresholds)
def mean_per_class_error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: mean per class error.
"""
return [[x[0], 1 - x[1]] for x in self.metric("mean_per_class_accuracy", thresholds=thresholds)]
def metric(self, metric, thresholds=None):
"""
:param str metric: The desired metric.
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then
the thresholds in this set of metrics will be used.
:returns: The set of metrics for the list of thresholds.
"""
assert_is_type(thresholds, None, [numeric])
if not thresholds: thresholds = [self.find_threshold_by_max_metric(metric)]
thresh2d = self._metric_json['thresholds_and_metric_scores']
metrics = []
for t in thresholds:
idx = self.find_idx_by_threshold(t)
metrics.append([t, thresh2d[metric][idx]])
return metrics
def plot(self, type="roc", server=False):
"""
Produce the desired metric plot.
:param type: the type of metric plot (currently, only ROC supported).
:param server: if True, generate plot inline using matplotlib's "Agg" backend.
:returns: None
"""
# TODO: add more types (i.e. cutoffs)
assert_is_type(type, "roc")
# check for matplotlib. exit if absent.
try:
imp.find_module('matplotlib')
import matplotlib
if server: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print("matplotlib is required for this function!")
return
if type == "roc":
plt.xlabel('False Positive Rate (FPR)')
plt.ylabel('True Positive Rate (TPR)')
plt.title('ROC Curve')
plt.text(0.5, 0.5, r'AUC={0:.4f}'.format(self._metric_json["AUC"]))
plt.plot(self.fprs, self.tprs, 'b--')
plt.axis([0, 1, 0, 1])
if not server: plt.show()
@property
def fprs(self):
"""
Return all false positive rates for all threshold values.
:returns: a list of false positive rates.
"""
return self._metric_json["thresholds_and_metric_scores"]["fpr"]
@property
def tprs(self):
"""
Return all true positive rates for all threshold values.
:returns: a list of true positive rates.
"""
return self._metric_json["thresholds_and_metric_scores"]["tpr"]
def confusion_matrix(self, metrics=None, thresholds=None):
"""
Get the confusion matrix for the specified metric
:param metrics: A string (or list of strings) in {"min_per_class_accuracy", "absolute_mcc", "tnr", "fnr", "fpr",
"tpr", "precision", "accuracy", "f0point5", "f2", "f1","mean_per_class_accuracy"}
:param thresholds: A value (or list of values) between 0 and 1
:returns: a list of ConfusionMatrix objects (if there are more than one to return), or a single ConfusionMatrix
(if there is only one).
"""
# make lists out of metrics and thresholds arguments
if metrics is None and thresholds is None: metrics = ["f1"]
if isinstance(metrics, list):
metrics_list = metrics
elif metrics is None:
metrics_list = []
else:
metrics_list = [metrics]
if isinstance(thresholds, list):
thresholds_list = thresholds
elif thresholds is None:
thresholds_list = []
else:
thresholds_list = [thresholds]
# error check the metrics_list and thresholds_list
assert_is_type(thresholds_list, [numeric])
assert_satisfies(thresholds_list, all(0 <= t <= 1 for t in thresholds_list))
if not all(m.lower() in ["min_per_class_accuracy", "absolute_mcc", "precision", "recall", "specificity",
"accuracy", "f0point5", "f2", "f1", "mean_per_class_accuracy"] for m in metrics_list):
raise ValueError(
"The only allowable metrics are min_per_class_accuracy, absolute_mcc, precision, accuracy, f0point5, "
"f2, f1, mean_per_class_accuracy")
# make one big list that combines the thresholds and metric-thresholds
metrics_thresholds = [self.find_threshold_by_max_metric(m) for m in metrics_list]
for mt in metrics_thresholds:
thresholds_list.append(mt)
thresh2d = self._metric_json['thresholds_and_metric_scores']
actual_thresholds = [float(e[0]) for i, e in enumerate(thresh2d.cell_values)]
cms = []
for t in thresholds_list:
idx = self.find_idx_by_threshold(t)
row = thresh2d.cell_values[idx]
tns = row[11]
fns = row[12]
fps = row[13]
tps = row[14]
p = tps + fns
n = tns + fps
c0 = n - fps
c1 = p - tps
if t in metrics_thresholds:
m = metrics_list[metrics_thresholds.index(t)]
table_header = "Confusion Matrix (Act/Pred) for max " + m + " @ threshold = " + str(
actual_thresholds[idx])
else:
table_header = "Confusion Matrix (Act/Pred) @ threshold = " + str(actual_thresholds[idx])
cms.append(ConfusionMatrix(cm=[[c0, fps], [c1, tps]], domains=self._metric_json['domain'],
table_header=table_header))
if len(cms) == 1:
return cms[0]
else:
return cms
def find_threshold_by_max_metric(self, metric):
"""
:param metric: A string in {"min_per_class_accuracy", "absolute_mcc", "precision", "recall", "specificity",
"accuracy", "f0point5", "f2", "f1", "mean_per_class_accuracy"}.
:returns: the threshold at which the given metric is maximal.
"""
crit2d = self._metric_json['max_criteria_and_metric_scores']
for e in crit2d.cell_values:
if e[0] == "max " + metric.lower():
return e[1]
raise ValueError("No metric " + str(metric.lower()))
def find_idx_by_threshold(self, threshold):
"""
Retrieve the index in this metric's threshold list at which the given threshold is located.
:param threshold: Find the index of this input threshold.
:returns: the index
:raises ValueError: if no such index can be found.
"""
assert_is_type(threshold, numeric)
thresh2d = self._metric_json['thresholds_and_metric_scores']
for i, e in enumerate(thresh2d.cell_values):
t = float(e[0])
if abs(t - threshold) < 0.00000001 * max(t, threshold):
return i
if threshold >= 0 and threshold <= 1:
thresholds = [float(e[0]) for i, e in enumerate(thresh2d.cell_values)]
threshold_diffs = [abs(t - threshold) for t in thresholds]
closest_idx = threshold_diffs.index(min(threshold_diffs))
closest_threshold = thresholds[closest_idx]
print("Could not find exact threshold {0}; using closest threshold found {1}."
.format(threshold, closest_threshold))
return closest_idx
raise ValueError("Threshold must be between 0 and 1, but got {0} ".format(threshold))
def gains_lift(self):
"""Retrieve the Gains/Lift table."""
if 'gains_lift_table' in self._metric_json:
return self._metric_json['gains_lift_table']
return None
class H2OAutoEncoderModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OAutoEncoderModelMetrics, self).__init__(metric_json, on, algo)
class H2ODimReductionModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2ODimReductionModelMetrics, self).__init__(metric_json, on, algo)
def num_err(self):
"""Sum of Squared Error over non-missing numeric entries, or None if not present."""
if MetricsBase._has(self._metric_json, "numerr"):
return self._metric_json["numerr"]
return None
def cat_err(self):
"""The Number of Misclassified categories over non-missing categorical entries, or None if not present."""
if MetricsBase._has(self._metric_json, "caterr"):
return self._metric_json["caterr"]
return None
class H2OWordEmbeddingModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OWordEmbeddingModelMetrics, self).__init__(metric_json, on, algo)
|
apache-2.0
| 4,609,477,976,568,163,300
| 36.758721
| 120
| 0.604319
| false
| 3.928323
| false
| false
| false
|
pegasus-isi/pegasus
|
share/pegasus/examples/grid-blackdiamond-python/blackdiamond.py
|
1
|
2722
|
#!/usr/bin/env python3
from Pegasus.DAX3 import *
import sys
import os
if len(sys.argv) != 2:
print "Usage: %s PEGASUS_HOME" % (sys.argv[0])
sys.exit(1)
# Create a abstract dag
diamond = ADAG("diamond")
# Add input file to the DAX-level replica catalog
a = File("f.a")
a.addPFN(PFN("file://" + os.getcwd() + "/f.a", "local"))
diamond.addFile(a)
# Add executables to the DAX-level replica catalog
# In this case the binary is keg, which is shipped with Pegasus, so we use
# the remote PEGASUS_HOME to build the path.
e_preprocess = Executable(namespace="diamond", name="preprocess", version="4.0", os="linux", arch="x86_64", installed=True)
e_preprocess.addPFN(PFN("file://" + sys.argv[1] + "/bin/pegasus-keg", "TestCluster"))
diamond.addExecutable(e_preprocess)
e_findrange = Executable(namespace="diamond", name="findrange", version="4.0", os="linux", arch="x86_64", installed=True)
e_findrange.addPFN(PFN("file://" + sys.argv[1] + "/bin/pegasus-keg", "TestCluster"))
diamond.addExecutable(e_findrange)
e_analyze = Executable(namespace="diamond", name="analyze", version="4.0", os="linux", arch="x86_64", installed=True)
e_analyze.addPFN(PFN("file://" + sys.argv[1] + "/bin/pegasus-keg", "TestCluster"))
diamond.addExecutable(e_analyze)
# Add a preprocess job
preprocess = Job(namespace="diamond", name="preprocess", version="4.0")
b1 = File("f.b1")
b2 = File("f.b2")
preprocess.addArguments("-a preprocess","-T60","-i",a,"-o",b1,b2)
preprocess.uses(a, link=Link.INPUT)
preprocess.uses(b1, link=Link.OUTPUT)
preprocess.uses(b2, link=Link.OUTPUT)
diamond.addJob(preprocess)
# Add left Findrange job
frl = Job(namespace="diamond", name="findrange", version="4.0")
c1 = File("f.c1")
frl.addArguments("-a findrange","-T60","-i",b1,"-o",c1)
frl.uses(b1, link=Link.INPUT)
frl.uses(c1, link=Link.OUTPUT)
diamond.addJob(frl)
# Add right Findrange job
frr = Job(namespace="diamond", name="findrange", version="4.0")
c2 = File("f.c2")
frr.addArguments("-a findrange","-T60","-i",b2,"-o",c2)
frr.uses(b2, link=Link.INPUT)
frr.uses(c2, link=Link.OUTPUT)
diamond.addJob(frr)
# Add Analyze job
analyze = Job(namespace="diamond", name="analyze", version="4.0")
d = File("f.d")
analyze.addArguments("-a analyze","-T60","-i",c1,c2,"-o",d)
analyze.uses(c1, link=Link.INPUT)
analyze.uses(c2, link=Link.INPUT)
analyze.uses(d, link=Link.OUTPUT, register=True)
diamond.addJob(analyze)
# Add control-flow dependencies
diamond.addDependency(Dependency(parent=preprocess, child=frl))
diamond.addDependency(Dependency(parent=preprocess, child=frr))
diamond.addDependency(Dependency(parent=frl, child=analyze))
diamond.addDependency(Dependency(parent=frr, child=analyze))
# Write the DAX to stdout
diamond.writeXML(sys.stdout)
|
apache-2.0
| 2,515,423,209,064,240,600
| 33.455696
| 123
| 0.713446
| false
| 2.69505
| false
| true
| false
|
kHarshit/DAT210x_Microsoft
|
Module2/sunspots.py
|
1
|
1053
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('Datasets/SN_d_tot_V2.0.csv')
# print(df)
""" CSV file doesn't have column labels
Column 1-3: Gregorian calendar date
Column 4: Date in fraction of year
Column 5: Daily total sunspot number. A value of -1 indicates that no number is available for that day (missing value).
Column 6: Daily standard deviation of the input sunspot numbers from individual stations.
Column 7: Number of observations used to compute the daily value.
Column 8: Definitive/provisional indicator."""
file_path = 'Datasets/SN_d_tot_V2.0.csv'
col_names = ['year', 'month', 'day', 'dec_date', 'sunspots/day', 'std_dev', 'no_of_obs', 'indicator']
sunspots = pd.read_csv(file_path, sep=';', header=None, names=col_names, na_values={'std_dev': [' -1']}, parse_dates=[[0, 1, 2]])
# to prevent pandas from assuming first line of column gives header labels.
print(sunspots.iloc[10:20, :])
# print(sunspots.info())
# sunspots.to_csv('sunspots.csv', sep='\t') # .to_excel()
|
mit
| -5,525,578,550,921,758,000
| 45.863636
| 129
| 0.702754
| false
| 3.025862
| false
| false
| false
|
semenzato/trunks
|
ttl_generator.py
|
2
|
24570
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Originally written by Mario and Luigi at Google.
"""A code generator for TPM utility functions.
The generator inputs the Trousers header file "tpm.h" (here renamed
"tss_tpm_h") as well as massaged representation of TPM commands from
the TPM specs, and outputs marshalling/unmarshalling functions and
type declarations for the TPM structures and commands.
"""
import re, sys, os
# Global copyright header.
_COPYRIGHT_HEADER = """\
/* Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
"""
# Header file include guards.
_HEADER_FILE_GUARD_HEADER = """
#ifndef %(name)s
#define %(name)s
"""
_HEADER_FILE_GUARD_FOOTER = """
#endif /* %(name)s */
"""
# The tag name for the following structures does not follow the convention.
_STRUCTURE_TAG_EXCEPTIONS = dict(
TPM_SIGN_INFO="TPM_TAG_SIGNINFO",
TPM_CONTEXT_BLOB="TPM_TAG_CONTEXTBLOB",
TPM_DELEGATE_KEY_BLOB="TPM_TAG_DELG_KEY_BLOB")
# A dictionary of commands that are ignored.
IGNORED_COMMANDS = set(["TPM_FieldUpgrade", "TPM_CertifySelfTest"])
# A set of struct declarations that are ignored.
IGNORED_STRUCTS = set([
"TPM_VERSION_BYTE",
"TPM_NV_DATA_SENSITIVE",
"TPM_KEY_HANDLE_LIST"])
# Bytecodes
BC_INT8 = "BC_INT8"
BC_INT16 = "BC_INT16"
BC_INT32 = "BC_INT32"
BC_REF = "BC_REF"
BC_ARRAY = "BC_ARRAY"
BC_RECORD = "BC_RECORD"
BC_FIELD_KIND_NORMAL = "BC_FIELD_KIND_NORMAL"
BC_FIELD_KIND_VARIABLE = "BC_FIELD_KIND_VARIABLE"
# This variable keeps track of bytecode positions
BYTECODE_OFFSET = 0
# Structures whose bytecode offset is required in hand-written C code.
MANUALLY_MARSHALLED_STRUCTS = set([
"TPM_NONCE",
"TPM_NV_DATA_PUBLIC",
"TPM_PUBKEY",
"TPM_RSA_KEY_PARMS",
])
# Variable-length integers (varints) are encoded as 7-bit digits, most
# significant first (big endian, for readability). Each digit is stored in a
# byte. The most significant bit is 1 when there are more digits, 0 otherwise.
# For instance:
#
# 4 -> 0x04
# 257 -> 0x81, 0x01
#
# We can use varints only for known integer values, for instance bytecode
# offsets. A bunch of values are only known at C compilation time.
def IntToByteCode(x):
return IntToByteCode1(x / 128) + [x % 128]
def IntToByteCode1(x):
if x == 0:
return []
else:
return IntToByteCode1(x / 128) + [x % 128 + 128]
def OutputVarInt(x, file):
global BYTECODE_OFFSET
file.write("/* (%04d) varint: %d */" % (BYTECODE_OFFSET, x))
bytes = IntToByteCode(x)
file.write("".join(map(lambda x: " %d," % x, bytes)))
BYTECODE_OFFSET += len(bytes)
file.write("\n")
def OutputByte(byte, file):
global BYTECODE_OFFSET
file.write("/* (%04d) */ %s,\n" % (BYTECODE_OFFSET, str(byte)))
BYTECODE_OFFSET += 1
def OutputTwoBytes(x, file):
global BYTECODE_OFFSET
file.write("/* (%04d) */ TWO_BYTES_INT(%s),\n" % (BYTECODE_OFFSET, str(x)))
BYTECODE_OFFSET += 2
def OutputOffsetOf(field_name, record_name, file):
global BYTECODE_OFFSET
file.write("/* (%04d) */ OFFSETOF_TWO_BYTES(%s, %s),\n" %
(BYTECODE_OFFSET, field_name, record_name))
BYTECODE_OFFSET += 2
# We parse a C header file (MIDL, actually) and produce descriptors for each
# type defined by the header file. Then we use the descriptors to output
# useful code.
#
# (Before we go any further: confusion may arise in the uninitiated from the
# use of Python objects to describe C types. The Python objects have types
# themselves. To reduce the confusion we try to call them "classes" and
# "subclasses" since, luckily, that's what they are. We reserve the words
# "struct", "record", and "type" for the C types.)
#
# Every named type has a descriptor. Each kind of type (struct, typedef, etc)
# has an associated class, which is a subclass of TypeDesc. Other classes are
# used internally to type descriptors, as described below.
#
# There are four kinds of types: built-in types, types defined by typedef,
# types defined by a struct declaration, and pointers.
class TypeDesc(object):
"""Generic type desriptor."""
def __init__(self):
self.bytecode_offset = -1
def OutputByteCodeOffset(self, file):
OutputVarInt(self.bytecode_offset, file)
def OutputByteCodeRef(self, file):
assert self.bytecode_offset >= 0
OutputByte(BC_REF, file)
self.OutputByteCodeOffset(file)
class NamedTypeDesc(TypeDesc):
"""Desriptor for named types."""
def __init__(self, name):
super(NamedTypeDesc, self).__init__()
self.name = name
def Format(self):
return self.name
class BuiltInTypeDesc(NamedTypeDesc):
"""Desriptor for built-in types."""
def __init__(self, name, bytecode):
super(BuiltInTypeDesc, self).__init__(name)
self.bytecode = bytecode
def BaseType(self):
return self
def OutputByteCodeRef(self, file):
OutputByte(self.bytecode, file)
class TypedefDesc(NamedTypeDesc):
"""Types defined by another type (i.e. aliases)."""
def __init__(self, name, definition):
super(TypedefDesc, self).__init__(name)
self.definition = definition
def BaseType(self):
return self.definition.BaseType()
def OutputDeclarations(self, out_file):
# Type declaration only
out_file.write("typedef %s %s;\n" % (self.definition.name, self.name))
def OutputDefinitions(self, out_file):
pass
def OutputByteCode(self, out_file):
pass
def OutputByteCodeRef(self, out_file):
self.definition.OutputByteCodeRef(out_file)
class RecordDesc(NamedTypeDesc):
"""Descriptor for structs (also typedefs of structs, for simplicity)."""
def BaseType(self):
return self
def OutputByteCode(self, out_file):
if self.fields:
bytecode_offset = BYTECODE_OFFSET
out_file.write("/* Record: %s */\n" % self.name)
OutputByte(BC_RECORD, out_file)
OutputByte(len(self.fields), out_file)
for field in self.fields:
field.OutputByteCode(out_file)
self.bytecode_offset = bytecode_offset
def OutputDeclarations(self, out_file):
if self.fields:
self.OutputTypeDeclaration(out_file)
def OutputTypeDeclaration(self, out_file):
out_file.write("\ntypedef struct %s {\n" % self.name)
for field in self.fields:
field.OutputFieldDeclaration(out_file)
out_file.write("} %s;\n\n" % self.name)
def TagName(self):
if self.name in _STRUCTURE_TAG_EXCEPTIONS:
return _STRUCTURE_TAG_EXCEPTIONS[self.name]
else:
return "TPM_TAG_" + self.name[4:]
class FieldDesc(object):
"""A RecordDesc has a list of fields.
Each field is described by either a FieldDesc, a VarFieldDesc (for
variable-length fields), or an ImplicitVarFieldDesc (special case of
variable-length field where the length is defined implicitly by a
payload type).
"""
def __init__(self, record, index):
# RECORD is the containing record descriptor, used to emit code that lets
# the C compiler compute field offsets. INDEX is the position of the field
# in the record, used to find the size field for variable-length fields.
self.record = record
self.index = index
self.size_field = None
def OutputByteCode(self, out_file):
out_file.write("/* Field: %s */\n" % self.name)
OutputByte(BC_FIELD_KIND_NORMAL, out_file)
OutputOffsetOf(self.name, self.record.name, out_file)
self.type.OutputByteCodeRef(out_file)
def OutputFieldDeclaration(self, out_file):
out_file.write(" %s %s;\n" % (self.type.name, self.name))
class VarFieldDesc(FieldDesc):
"""Descriptor for variable-length fields."""
def OutputByteCode(self, out_file):
out_file.write("/* Variable-length field: %s */\n" % self.name)
OutputByte(BC_FIELD_KIND_VARIABLE, out_file)
OutputOffsetOf(self.name, self.record.name, out_file)
OutputByte(self.size_field.index, out_file)
self.type.OutputByteCodeRef(out_file)
def OutputFieldDeclaration(self, out_file):
out_file.write(" %s* %s;\n" % (self.type.name, self.name))
class ImplicitVarFieldDesc(VarFieldDesc):
"""Descriptor for implicit variable-length fields."""
pass
class ArrayFieldDesc(FieldDesc):
"""Descriptor for fixed-length array (e.g. TPM_SaveContext, TPM_NONCE)."""
def OutputFieldDeclaration(self, out_file):
out_file.write(" %s %s[%s];\n" % (self.element_type.name,
self.name,
self.length))
def OutputByteCode(self, out_file):
out_file.write("/* Array field: %s */\n" % self.name)
OutputByte(BC_ARRAY, out_file)
OutputTwoBytes(self.length, out_file)
self.element_type.OutputByteCodeRef(out_file)
class ArgDesc(object):
"""Descriptor for formal argument of a function."""
def __init__(self, argtype, name):
self.type = argtype
self.name = name
class PointerDesc(TypeDesc):
"""Pointer type (used in argument lists)."""
def __init__(self, base_type):
super(PointerDesc, self).__init__()
self.base_type = base_type
def Format(self):
return self.base_type.Format() + "*"
# The symbol table, i.e. a dictionary mapping type names to type descriptors.
# It is initialized here with the predefined types.
TYPES_DICT = dict(
BYTE=BuiltInTypeDesc("uint8_t", "BC_INT8"),
TSS_BOOL=BuiltInTypeDesc("uint8_t", "BC_INT8"),
BOOL=BuiltInTypeDesc("uint8_t", "BC_INT8"),
UINT16=BuiltInTypeDesc("uint16_t", "BC_INT16"),
UINT32=BuiltInTypeDesc("uint32_t", "BC_INT32"),
UINT64=BuiltInTypeDesc("uint64_t", "BC_INT64"),
APSession=BuiltInTypeDesc("APSession", "BC_APSESSION"),
TPM_RESULT=BuiltInTypeDesc("TPM_RESULT", "BC_INT32"),
int=BuiltInTypeDesc("int", "BC_INT32"),
)
class StructureParser(object):
"""Type structure parser.
"Doing It Right (TM)" would be overkill here. To parse the header file
"the right way" we would need a full C parser (MIDL, actually). So instead
we make heavy assumptions on the file format and the types we need to deal
with. Since the input is quite stable (no changes are expected), this
works well enough.
"""
# compile regular expressions
_STRUCT_RE = re.compile("^typedef\s+struct\s+td(\w+)")
_ENDSTRUCT_RE = re.compile("^}")
_TYPEDEF_RE = re.compile("^typedef\s+(\w+)\s+(\w+)")
# "type name" or "type name[...]" or "type *name"
_FIELD_RE = re.compile("^\s+(\w+(?:\s*[*])?)\s*(\w+)((?:[[].*[]])?)")
_SIZEIS_RE = re.compile("^\s+SIZEIS.(\w+)")
def Parse(self, filename):
"""Parses the TPM header file to extract structure information.
Args:
filename: Name of the TPM header file
Returns:
List of extracted type descriptors.
"""
types = []
header_file = open(filename)
for line in header_file:
match = self._STRUCT_RE.search(line)
if match:
name = match.group(1)
if name in IGNORED_STRUCTS:
continue
desc = RecordDesc(name)
TYPES_DICT[name] = desc
self.ParseRecord(header_file, desc)
types.append(desc)
continue
match = self._TYPEDEF_RE.search(line)
if match:
old_name = match.group(1)
new_name = match.group(2)
old_desc = TYPES_DICT[old_name]
desc = TypedefDesc(new_name, old_desc)
TYPES_DICT[new_name] = desc
types.append(desc)
continue
return types
def ParseRecord(self, in_file, record_desc):
"""Parses the body of a TPM struct declaration (all but the first line)."""
fields_list = []
i = 0
size_field_name = None
line = in_file.next() # skip open brace
while True:
line = in_file.next()
match = self._SIZEIS_RE.search(line)
if match:
size_field_name = match.group(1)
continue
match = self._FIELD_RE.search(line)
if match:
type_name = match.group(1) + match.group(3)
field_name = match.group(2)
field_desc = FieldDesc(record_desc, i)
field_desc.name = field_name
field_desc.type = type_name
if size_field_name:
size_field_index = next((f for f in xrange(len(fields_list))
if fields_list[f].name == size_field_name))
field_desc.size_field_index = size_field_index
size_field_name = None
fields_list.append(field_desc)
i = i + 1
continue
match = self._ENDSTRUCT_RE.search(line)
if match:
record_desc.fields = fields_list
return
class StructureGenerator(object):
"""TPM structure types and marshaling code generator."""
def Generate(self, types, filename_h, filename_c):
"""Generates the .c and .h file for the given types."""
# Declarations (.h file)
h = open(filename_h, "w")
h.write(_COPYRIGHT_HEADER)
guard_name = "TRUNKS_%s_" % filename_h.upper().replace(".", "_")
h.write(_HEADER_FILE_GUARD_HEADER % {"name": guard_name})
h.write("""
#include <stdint.h>
#include <string.h>
#include "trunks_tpm.h"
""")
for t in types:
t.OutputDeclarations(h)
h.write(_HEADER_FILE_GUARD_FOOTER % {"name": guard_name})
h.close()
# Bytecodes (.c file)
c = open(filename_c, "w")
c.write(_COPYRIGHT_HEADER)
c.write("""
#include <stdint.h>
#include <string.h>
#include "%s"
#include "trunks_internal.h"
#include "g_tpm_commands.h"
uint8_t StructureByteCodes[] = {
""" % filename_h)
for t in types:
t.OutputByteCode(c)
c.close()
class Command(object):
"""Descriptor for a TPM command."""
def __init__(self, name):
self.name = name
self.has_auth1 = False
self.has_auth2 = False
self.has_ins = False
self.has_outs = False
def OutputDeclarations(self, out_file):
self.request.OutputDeclarations(out_file)
self.response.OutputDeclarations(out_file)
out_file.write("\n")
self.OutputFunctionHeader(out_file, ";\n")
def OutputByteCode(self, out_file):
self.request.OutputByteCode(out_file)
self.response.OutputByteCode(out_file)
def OutputDefinitions(self, out_file):
self.OutputFunctionHeader(out_file, " {")
self.OutputFunctionBody(out_file)
def OutputFunctionHeader(self, out_file, suffix):
"""Outputs the function header for this command."""
out_file.write("""\
/* Sends a %s command to the TPM and reads the response. */
uint32_t Ttl_%s(%s)%s""" % (self.name, self.name, self.ArgList(), suffix))
def ArgList(self):
if self.args:
arg_list = map(lambda a: "%s %s" % (a.type.Format(), a.name), self.args)
return ", ".join(arg_list)
else:
return "void"
def OutputFunctionBody(self, out_file):
"""Outputs the function body for this command."""
body_template = """
%(initializers)s RETURN_ON_FAILURE(TtlRunCommand(%(ordinal)s,
%(rqu_bytecode)s, %(rsp_bytecode)s, in_parameters, out_parameters,
auth1, auth2, buffer, buffer_size));
return TPM_SUCCESS;
}
"""
initializers = ""
if not self.has_ins:
initializers += " void* in_parameters = NULL;\n"
if not self.has_outs:
initializers += " void* out_parameters = NULL;\n"
initializers += " uint8_t buffer[TPM_MAX_COMMAND_LENGTH];\n"
initializers += " int buffer_size = sizeof(buffer);\n"
if not self.has_auth1:
initializers += " void* auth1 = NULL;\n"
if not self.has_auth2:
initializers += " void* auth2 = NULL;\n"
# write function body
out_file.write(body_template % {
"initializers": initializers,
"ordinal": self.ordinal,
"rqu_bytecode": self.request.bytecode_offset,
"rsp_bytecode": self.response.bytecode_offset,
})
class CommandParser(object):
"""Command definition parser.
The text file is extracted from the PDF file containing the TPM
command specification from the Trusted Computing Group. The syntax
of the text file is ad-hoc.
"""
_LINE_SKIP_RE = re.compile("^(\s*$)|(^_COMMENT)")
_TYPE_RE = "(\w+(?:\s*[[].*[]])?)"
_COMMAND_RE = re.compile("^_TPM_COMMAND\s+(\w+)$")
_IN_PARAM_RE = re.compile("^_IN_PARAM\s+%s\s+(\w+)\s+(.*)$" % _TYPE_RE)
_OUT_PARAM_RE = re.compile("^_OUT_PARAM\s+%s\s+(\w+)\s+(.*)$" % _TYPE_RE)
# One line of lookahead
_line = None
def NextLine(self, in_file):
try:
while True:
self._line = in_file.next()
if not self._LINE_SKIP_RE.search(self._line):
break
except StopIteration:
self._line = None
def Parse(self, filename):
"""Parses a text version of the TPM command specification.
Args:
filename: Name of the TPM command specification file.
Returns:
List of extracted command descriptors.
"""
commands = []
f = open(filename)
self.NextLine(f)
while True:
cmd = self.ParseCommand(f)
if not cmd:
return commands
if cmd.name not in IGNORED_COMMANDS:
commands.append(cmd)
def ParseCommand(self, f):
"""Parses a TPM command structure."""
if not self._line:
return None
match = self._COMMAND_RE.search(self._line)
if not match:
print "cannot match command from line: %s\n" % self._line
name = match.group(1)
cmd = Command(name)
self.NextLine(f)
cmd.request = self.ParseCommandRR(f, self._IN_PARAM_RE, name + "_rqu")
cmd.response = self.ParseCommandRR(f, self._OUT_PARAM_RE, name + "_rsp")
assert ((cmd.request.fields and cmd.response.fields) or
cmd.name in IGNORED_COMMANDS)
return cmd
def ParseCommandRR(self, f, regexp, name):
"""Parses a request or response structure."""
fields = []
i = 0
record = RecordDesc(name)
while self._line:
match = regexp.search(self._line)
if not match:
break
field = FieldDesc(record, i)
field.name = match.group(2)
# For now assign the type name, not the descriptor, and resolve later.
# The type resolution also includes transforming a FIELD into a VARFIELD
# or ARRAYFIELD when applicable.
field.type = match.group(1)
field.description = match.group(3)
field.size_field_name = None
fields.append(field)
i = i + 1
self.NextLine(f)
record.fields = fields
return record
class Rewriter(object):
"""TPM type and command rewriter."""
_POINTER_RE = re.compile("^(\w+)\s*[*]$")
_ARRAY_RE = re.compile("^(\w+)\s*[[]\s*[]]$")
_FIXARRAY_RE = re.compile("^(\w+)\s*[[]\s*(\w+)\s*[]]$")
_TAG_RE = re.compile("^\s*(\w+)\s*$")
_ORD_RE = re.compile("^.*((TPM|TSC)_ORD_\w+).*$")
def Rewrite(self, commands):
for command in commands:
self.FixTypes(command.request)
self.FixTypes(command.response)
self.ExtractConstants(command)
self.FixRequestHeaderAndTrailer(command)
self.FixResponseHeaderAndTrailer(command)
self.ComputeCommandArgs(command)
def FixTypes(self, record):
"""Fixes the given command structures.
Args:
record: structure to be fixed.
The following modifications are applied:
1. Replace type names in fields with their type descriptors
2. Change Fields into VarFields as applicable.
3. Change Fields into ArrayFields as applicable.
"""
if not isinstance(record, RecordDesc):
return
new_fields = []
previous_old_field = None
previous_new_field = None
for field in record.fields:
match = Rewriter._POINTER_RE.match(field.type)
if not match:
match = Rewriter._ARRAY_RE.match(field.type)
if match:
new_field = VarFieldDesc(record, field.index)
new_field.name = field.name
new_field.type = TYPES_DICT[match.group(1)]
new_field.size_field = previous_new_field
else:
match = Rewriter._FIXARRAY_RE.match(field.type)
if match:
new_field = ArrayFieldDesc(record, field.index)
element_type = TYPES_DICT[match.group(1)]
new_field.element_type = element_type
new_field.name = field.name
new_field.length = match.group(2)
else:
new_field = field # recycle
new_field.type = TYPES_DICT[field.type]
new_fields.append(new_field)
previous_old_field = field
previous_new_field = new_field
record.fields = new_fields
def ExtractConstants(self, cmd):
"""Extracts the command tag and ordinal."""
match = Rewriter._TAG_RE.search(cmd.request.fields[0].description)
if match:
cmd.tag = match.group(1)
match = Rewriter._ORD_RE.search(cmd.request.fields[2].description)
if match:
cmd.ordinal = match.group(1)
if not cmd.tag or not cmd.ordinal:
print "Could not extract tag or ordinal for command %s" % cmd.name
def FixFields(self, fields, len):
fields = fields[3:len]
for field in fields:
field.index -= 3
return fields
def FixRequestHeaderAndTrailer(self, cmd):
"""Fixes the request header and trailer according to the command type."""
req_params_len = len(cmd.request.fields)
if cmd.tag == "TPM_TAG_RQU_AUTH2_COMMAND":
req_params_len -= 10
cmd.has_auth1 = True
cmd.has_auth2 = True
elif cmd.tag == "TPM_TAG_RQU_AUTH1_COMMAND":
req_params_len -= 5
cmd.has_auth1 = True
# remove first three fields
cmd.request.fields = self.FixFields(cmd.request.fields, req_params_len)
cmd.has_ins = len(cmd.request.fields) > 0
def FixResponseHeaderAndTrailer(self, cmd):
"""Fixes the response header and trailer according to the command type."""
rsp_params_len = len(cmd.response.fields)
if cmd.tag == "TPM_TAG_RQU_AUTH2_COMMAND":
rsp_params_len -= 6
elif cmd.tag == "TPM_TAG_RQU_AUTH1_COMMAND":
rsp_params_len -= 3
cmd.response.fields = self.FixFields(cmd.response.fields, rsp_params_len)
cmd.has_outs = len(cmd.response.fields) > 0
def ComputeCommandArgs(self, cmd):
"""Generates the argument list for the given command."""
cmd.args = []
if cmd.has_ins:
cmd.args.append(ArgDesc(PointerDesc(cmd.request), "in_parameters"))
if cmd.has_outs:
cmd.args.append(ArgDesc(PointerDesc(cmd.response), "out_parameters"))
cmd.args.append(ArgDesc(PointerDesc(TYPES_DICT["BYTE"]), "buffer"))
cmd.args.append(ArgDesc(TYPES_DICT["int"], "buffer_size"))
if cmd.has_auth1:
cmd.args.append(ArgDesc(PointerDesc(TYPES_DICT["APSession"]), "auth1"))
if cmd.has_auth2:
cmd.args.append(ArgDesc(PointerDesc(TYPES_DICT["APSession"]), "auth2"))
class CommandGenerator(object):
"""TPM command types and marshaling code generator."""
def Generate(self, commands, filename_h, filename_b, filename_c):
"""Generates the .c and .h file for the given commands."""
h = open(filename_h, "w")
h.write(_COPYRIGHT_HEADER)
guard_name = "TRUNKS_%s_" % filename_h.upper().replace(".", "_")
h.write(_HEADER_FILE_GUARD_HEADER % {"name": guard_name})
h.write("""
#include "g_tpm_structures.h"
#include "trunks_internal.h"
""")
b = open(filename_b, "a")
b.write("\n\n/* Command Structures (request and response) */\n\n")
c = open(filename_c, "w")
c.write(_COPYRIGHT_HEADER)
c.write("""
#include "%s"
""" % filename_h)
# Output addresses of bytecodes for some struct types.
for name in MANUALLY_MARSHALLED_STRUCTS:
struct = TYPES_DICT[name]
h.write("#define TTL_%s_BYTECODE_OFFSET %d\n" %
(name, struct.bytecode_offset))
h.write("\n")
# Output commands.
for command in commands:
command.OutputDeclarations(h)
command.OutputByteCode(b)
command.OutputDefinitions(c)
h.write(_HEADER_FILE_GUARD_FOOTER % {"name": guard_name})
h.close()
b.write("};\n")
b.close()
c.close()
def Run():
tpm_structures = StructureParser().Parse("tss_tpm_h")
for structure in tpm_structures:
Rewriter().FixTypes(structure)
StructureGenerator().Generate(
tpm_structures, "g_tpm_structures.h", "g_tpm_structures.c")
tpm_commands = CommandParser().Parse("g_tpm_commands_structure.txt")
Rewriter().Rewrite(tpm_commands)
CommandGenerator().Generate(tpm_commands, "g_tpm_commands.h",
"g_tpm_structures.c", "g_tpm_commands.c")
def Test():
print "no tests yet"
exit(1)
# main program
if __name__ == "__main__":
if len(sys.argv) == 1:
Run()
elif len(sys.argv) == 2 and sys.argv[1] == "test":
Test()
else:
sys.stderr.write("usage: %s [test]\n" % sys.argv[0])
exit(1)
|
bsd-3-clause
| 1,337,511,305,563,541,800
| 29.446097
| 79
| 0.649206
| false
| 3.354266
| false
| false
| false
|
vtemian/git-to-trello
|
github/hook.py
|
1
|
1448
|
import json
from flask import Blueprint, render_template, request
import requests
import config
from tracker.API import TrackerAPI
hook = Blueprint('hooks', __name__, 'templates')
@hook.route('/github/hook')
def home():
return render_template('new.html')
@hook.route('/github/hook/new', methods=['POST'])
def new():
data = {
"name": request.form['name'],
"active": request.form['active'] if 'active' in request.form else 'false',
"events": request.form['events'].split(','),
"config": {
"url": request.form['url'],
"content_type": request.form['content_type']
}
}
auth_url = "?access_token=%s" % config.GITHUB_TOKEN
url = "https://api.github.com/repos/%s/%s/hooks" % (request.form['user'],
request.form['repo'])
response = requests.post("%s%s" % (url, auth_url), data=json.dumps(data))
return render_template('response.html', response=response.content)
@hook.route('/github/hook/push', methods=['POST'])
def push():
tracker = TrackerAPI(config.TRELLO_KEY, config.TRELLO_TOKEN)
data = json.loads(request.data)
state_router = {
'success': 'green',
'pending': 'yellow',
'failure': 'red',
'error' : 'red',
}
state_details = {
'card': data['pull_request']['body'],
'state': state_router[data['state']]
}
tracker.change_state(**state_details)
return render_template('response.html', response='done')
|
mit
| 440,589,313,761,384,800
| 25.814815
| 78
| 0.620166
| false
| 3.43128
| false
| false
| false
|
eberle1080/tesserae-ng
|
website/tesserae_ng/forms.py
|
1
|
2640
|
from django import forms
import logging
from website.tesserae_ng.models import SourceTextVolume
logger = logging.getLogger(__name__)
class SourceTextSubmitForm(forms.Form):
LANGUAGE_CHOICES = (
('latin', 'Latin'),
('greek', 'Greek'),
('english', 'English')
)
def _boundText(auto_source, auto_query, auto_value, input_value, source_value):
"""
Example:
_boundText('myPeople', 'getPeople', 'mySelectedGuid', 'name', 'guid')
"""
bind_text = "jqAuto: { autoFocus: true }, jqAutoSource: " + auto_source + ", jqAutoQuery: " + \
auto_query + ", jqAutoValue: " + auto_value + ", jqAutoSourceLabel: 'displayName', " + \
"jqAutoSourceInputValue: '" + input_value + "', jqAutoSourceValue: '" + source_value + "'"
return forms.TextInput(attrs={'data-bind':bind_text})
enabled = forms.BooleanField(label='Indexed', required=True, initial=True)
language = forms.ChoiceField(label='Text language', choices=LANGUAGE_CHOICES, required=True)
author = forms.CharField(label='Work author', max_length=255, required=True,
widget=_boundText('authors', 'getAuthors', 'selectedAuthor', 'name', 'name'))
title = forms.CharField(label='Work name', max_length=255, required=True,
widget=_boundText('titles', 'getTitles', 'selectedTitle', 'title', 'title'))
volume = forms.CharField(label='Volume name', max_length=255, required=False)
online_source_name = forms.CharField(label='Online source name', max_length=255, required=False)
online_source_link = forms.URLField(label='Online source URL', required=False)
print_source_name = forms.CharField(label='Print source name', max_length=255, required=False)
print_source_link = forms.URLField(label='Print source URL', required=False)
source_file = forms.FileField(allow_empty_file=False, required=True, label='Source file')
class STVChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.source.title + " (" + obj.volume + ")"
class SimpleSearchForm(forms.Form):
source = STVChoiceField(queryset=SourceTextVolume.objects, empty_label="Choose a source text")
target = STVChoiceField(queryset=SourceTextVolume.objects, empty_label="Choose a target text")
start = forms.IntegerField(initial=0, min_value=0, widget=forms.widgets.HiddenInput())
rows = forms.IntegerField(initial=50, min_value=1, widget=forms.widgets.HiddenInput())
sw = forms.CharField(min_length=0, max_length=10000, required=False, widget=forms.widgets.HiddenInput())
|
bsd-2-clause
| 3,137,745,687,063,789,600
| 46.142857
| 108
| 0.675758
| false
| 3.760684
| false
| false
| false
|
ebroder/anygit
|
anygit/client/git_parser.py
|
1
|
5600
|
#!/usr/bin/python
import os
import subprocess
import sys
import tempfile
from dulwich import pack
DIR = os.path.abspath(os.path.dirname(__file__))
UNPACK_DIR = os.path.join(DIR, '../../tmp/unpack')
GIT_CMD = os.path.join(DIR, '../../pkgs/git/git')
types = {'t' : 'tree', 'b' : 'blob', 'c' : 'commit', 'a' : 'tag'}
class Finished(Exception):
pass
class ObjectsIterator(object):
def __init__(self, data, is_path, unpack):
if not is_path:
assert not unpack
self.data = data
self.is_path = is_path
self.unpack = unpack
if not unpack:
if is_path:
pack_data = pack.PackData.from_path(data)
else:
file = StringIO.StringIO(data)
length = len(data)
pack_data = pack.PackData.from_file(file, length)
self.uncompressed_pack = pack.Pack.from_objects(pack_data, None)
def iterobjects(self):
if self.unpack:
# Initialize a new repo and unpack into there. Should use our
# patched unpacker, which prints out parseable data. For best
# performance, make UNPACK_DIR be on a tmpfs.
assert self.is_path
unpack_dir = tempfile.mkdtemp(prefix='unpack_', suffix='.git', dir=UNPACK_DIR)
subprocess.check_call([GIT_CMD, 'init', '--bare', unpack_dir])
p = subprocess.Popen([GIT_CMD, 'unpack-objects'],
cwd=unpack_dir,
stdin=file(self.data),
stdout=subprocess.PIPE)
return parse(p.stdout)
else:
return (wrap_dulwich_object(obj) for obj in self.uncompressed_pack.iterobjects())
def wrap_dulwich_object(obj):
try:
type = obj._type
except AttributeError:
# Is new style, just return
return obj
else:
if type == 'tree':
return Tree(obj.id, obj.iteritems())
elif type == 'tag':
# Name used to be get_object, now is a property object.
return Tag(obj.id, obj.get_object()[1])
elif type == 'commit':
return Commit(obj.id, obj.tree, obj.parents)
else:
assert type == 'blob'
return Blob(obj.id)
class GitObject(object):
"""A git object, copying the interface of dulwich objects."""
def __init__(self, id):
self.id = id
def __str__(self):
return '%s: %s' % (type(self).__name__, self.id)
class Tree(GitObject):
type_name = 'tree'
def __init__(self, id, children):
super(Tree, self).__init__(id)
self.children = children
def iteritems(self):
return iter(self.children)
class Tag(GitObject):
type_name = 'tag'
def __init__(self, id, child_sha1):
super(Tag, self).__init__(id)
self.child_sha1 = child_sha1
@property
def object(self):
return (None, self.child_sha1)
class Commit(GitObject):
type_name = 'commit'
def __init__(self, id, tree, parents):
super(Commit, self).__init__(id)
self.tree = tree
self.parents = parents
class Blob(GitObject):
type_name = 'blob'
def get_next_len(f):
t = f.read(1)
if not t:
raise Finished
type = types[t]
space = f.read(1)
assert space == ' '
accum = 0
while True:
n = f.read(1)
if n != '\0':
accum = int(n) + 10 * accum
else:
break
return type, accum
def null_split(s):
for i, char in enumerate(s):
if char == '\0':
return s[:i], s[i+1:]
else:
raise ValueError('No null byte found in %s' % s)
def grab_sha1(s, encoded=False):
if encoded:
return s[:40], s[40:]
else:
return s[:20].encode('hex'), s[20:]
def grab_mode(s):
return s[:5], s[6:]
def parse(f):
try:
while True:
type, accum = get_next_len(f)
sha1 = f.read(20).encode('hex')
if type == 'tree':
data = f.read(accum)
children = []
while data:
mode, data = grab_mode(data)
filename, data = null_split(data)
child_sha1, data = grab_sha1(data, encoded=False)
children.append((filename, mode, child_sha1))
yield Tree(sha1, children)
elif type == 'tag':
data = f.read(accum)
assert data[:7] == 'object '
child_sha1, _ = grab_sha1(data[7:], encoded=True)
yield Tag(sha1, child_sha1)
elif type == 'commit':
tree = None
parents = []
data = f.read(accum)
while data[:6] != 'author':
if data[:5] == 'tree ':
assert tree is None
tree, data = grab_sha1(data[5:], encoded=True)
else:
assert data[:7] == 'parent '
child_sha1, data = grab_sha1(data[7:], encoded=True)
parents.append(child_sha1)
# Slurp a newline
assert data[0] == '\n'
data = data[1:]
yield Commit(sha1, tree, parents)
else:
assert type == 'blob'
yield Blob(sha1)
except Finished:
print 'Completed'
if __name__ == '__main__':
for type, sha1, extra in parse(sys.stdin):
print type, sha1, extra
|
mit
| -6,383,588,600,727,762,000
| 28.62963
| 93
| 0.503036
| false
| 3.786342
| false
| false
| false
|
altai/focus2
|
focus2/utils/jinja.py
|
1
|
3386
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Focus2
# Copyright (C) 2012 Grid Dynamics Consulting Services, Inc
# All Rights Reserved
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import datetime
from jinja2 import filters
# correct implementation for Jinja2 buggy function
# github.com/mitsuhiko/jinja2/commit/95b1d600780166713acfe05b18266e5e83dfa9a9
def do_filesizeformat(value, binary=True, exactly=False):
"""Format the value like a 'human-readable' file size (i.e. 13 kB,
4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
Giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (Mebi, Gibi).
"""
bytes = float(value)
base = binary and 1024 or 1000
prefixes = [
(binary and "KiB" or "kB"),
(binary and "MiB" or "MB"),
(binary and "GiB" or "GB"),
(binary and "TiB" or "TB"),
(binary and "PiB" or "PB"),
(binary and "EiB" or "EB"),
(binary and "ZiB" or "ZB"),
(binary and "YiB" or "YB")
]
if bytes == 1:
return "1 Byte"
elif bytes < base:
return "%d Bytes" % bytes
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 1)
if bytes < unit * base:
break
return "%.1f %s%s" % (
(bytes / unit), prefix,
(" (%d Bytes)" % bytes if exactly else ""))
def str_to_datetime(dtstr):
"""
Convert string to datetime.datetime. String should be in ISO 8601 format.
The function raises ``ValueError`` for invalid date string.
"""
if not dtstr:
return None
if dtstr.endswith("Z"):
dtstr = dtstr[:-1]
for fmt in ("%Y-%m-%dT%H:%M:%S",
"%Y-%m-%dT%H:%M:%S.%f",
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %H:%M:%S.%f"):
try:
return datetime.datetime.strptime(dtstr, fmt)
except ValueError:
pass
raise ValueError("Not ISO 8601 format date: %s" % dtstr)
def do_datetimeformat(value, format, default=""):
return str_to_datetime(value).strftime(format) if value else default
def do_diskformat(value):
fmt = {
"aki": "Amazon kernel image",
"ari": "Amazon ramdisk image",
"ami": "Amazon machine image",
}
return fmt.get(value, value)
def do_costformat(value):
return "%.2f" % float(value)
def image_spawnable(image):
return image["container-format"] not in ("ari", "aki")
def setup_env(env):
env.filters["filesizeformat"] = do_filesizeformat
env.filters["datetimeformat"] = do_datetimeformat
env.filters["diskformat"] = do_diskformat
env.filters["costformat"] = do_costformat
env.tests["image_spawnable"] = image_spawnable
|
lgpl-2.1
| 7,323,760,017,044,286,000
| 30.943396
| 77
| 0.623449
| false
| 3.556723
| false
| false
| false
|
rishubhjain/commons
|
tendrl/commons/event.py
|
1
|
2085
|
import struct
from gevent import socket
from gevent.socket import error as socket_error
from gevent.socket import timeout as socket_timeout
import sys
from tendrl.commons.message import Message
from tendrl.commons.logger import Logger
import traceback
class Event(object):
def __init__(self, message, socket_path=None):
if message.publisher == "node_agent":
try:
json_str = Message.to_json(message)
message = Message.from_json(json_str)
Logger(message)
except (TypeError, ValueError, KeyError, AttributeError):
sys.stderr.write(
"Unable to log the message.%s\n" % message)
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(
exc_type, exc_value, exc_tb, file=sys.stderr)
else:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket_path = socket_path
if self.socket_path is None:
self.socket_path = NS.config.data['logging_socket_path']
self._write(message)
def _write(self, message):
try:
json_str = Message.to_json(message)
self.sock.connect(self.socket_path)
self._pack_and_send(json_str)
except (socket_error, socket_timeout, TypeError):
msg = Message.to_json(message)
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(
exc_type, exc_value, exc_tb, file=sys.stderr)
sys.stderr.write(
"Unable to pass the message into socket.%s\n" % msg)
finally:
self.sock.close()
def _pack_and_send(self, msg):
frmt = "=%ds" % len(msg)
packedMsg = struct.pack(frmt, msg)
packedHdr = struct.pack('=I', len(packedMsg))
self._send(packedHdr)
self._send(packedMsg)
def _send(self, msg):
sent = 0
while sent < len(msg):
sent += self.sock.send(msg[sent:])
|
lgpl-2.1
| 5,323,745,002,074,403,000
| 34.948276
| 73
| 0.573621
| false
| 3.889925
| false
| false
| false
|
tedunderwood/horizon
|
logistic/reproduce_poetic_prestige.py
|
1
|
8056
|
#!/usr/bin/env python3
# reproduce.py
import csv, os, sys, pickle, math
import versatiletrainer as train
import pandas as pd
# sourcefolder =
# extension =
# metadatapath =
# outputpath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/predictions.csv'
def genre_gridsearch(modelname, c_range, ftstart, ftend, ftstep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1700, excl_above = 2000):
# Function does a gridsearch to identify an optimal number of features and setting of
# the regularization constant; then produces that model.
sourcefolder = '/Users/tunder/Dropbox/GenreProject/python/reception/poetryEF/fromEF/'
extension = '.tsv'
#metadatapath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/prestigeficmeta.csv'
metadatapath = '/Users/tunder/Dropbox/GenreProject/python/reception/poetryEF/poemeta.csv'
vocabpath = '/Users/tunder/Dropbox/fiction/lexicon/' + modelname + '.txt'
if os.path.exists(vocabpath):
print('Vocabulary for ' + modelname + ' already exists. Using it.')
outputpath = '/Users/tunder/Dropbox/GenreProject/python/reception/poetryEF/' + modelname + '.csv'
# We can simply exclude volumes from consideration on the basis on any
# metadata category we want, using the dictionaries defined below.
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = excl_below
excludeabove['firstpub'] = excl_above
sizecap = 700
# CLASSIFY CONDITIONS
# print()
# print("You can also specify positive tags to be excluded from training, and/or a pair")
# print("of integer dates outside of which vols should be excluded from training.")
# print("If you add 'donotmatch' to the list of tags, these volumes will not be")
# print("matched with corresponding negative volumes.")
# print()
# ## testphrase = input("Comma-separated list of such tags: ")
testphrase = ''
testconditions = set([x.strip() for x in testphrase.split(',') if len(x) > 0])
datetype = "firstpub"
numfeatures = ftend
regularization = .000075
# linting the code would get rid of regularization, which is at this
# point an unused dummy parameter
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
modelparams = 'logistic', 12, ftstart, ftend, ftstep, c_range
matrix, rawaccuracy, allvolumes, coefficientuples = train.tune_a_model(paths, exclusions, classifyconditions, modelparams)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
tiltaccuracy = train.diachronic_tilt(allvolumes, 'linear', [])
print("Divided with a line fit to the data trend, it's ", str(tiltaccuracy))
def ocr_gridsearch(modelname, c_range, ftstart, ftend, ftstep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1700, excl_above = 2000):
# Function does a gridsearch to identify an optimal number of features and setting of
# the regularization constant; then produces that model.
sourcefolder = '/Users/tunder/Dropbox/python/ocr/ocrtexts/'
extension = '.tsv'
#metadatapath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/prestigeficmeta.csv'
metadatapath = '/Users/tunder/Dropbox/GenreProject/python/reception/poetryEF/poemeta.csv'
vocabpath = '/Users/tunder/Dropbox/python/ocr/' + modelname + '.txt'
if os.path.exists(vocabpath):
print('Vocabulary for ' + modelname + ' already exists. Using it.')
outputpath = '/Users/tunder/Dropbox/python/ocr/' + modelname + '.csv'
# We can simply exclude volumes from consideration on the basis on any
# metadata category we want, using the dictionaries defined below.
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = excl_below
excludeabove['firstpub'] = excl_above
sizecap = 700
# CLASSIFY CONDITIONS
# print()
# print("You can also specify positive tags to be excluded from training, and/or a pair")
# print("of integer dates outside of which vols should be excluded from training.")
# print("If you add 'donotmatch' to the list of tags, these volumes will not be")
# print("matched with corresponding negative volumes.")
# print()
# ## testphrase = input("Comma-separated list of such tags: ")
testphrase = ''
testconditions = set([x.strip() for x in testphrase.split(',') if len(x) > 0])
datetype = "firstpub"
numfeatures = ftend
regularization = .000075
# linting the code would get rid of regularization, which is at this
# point an unused dummy parameter
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
modelparams = 'logistic', 12, ftstart, ftend, ftstep, c_range
matrix, rawaccuracy, allvolumes, coefficientuples = train.tune_a_model(paths, exclusions, classifyconditions, modelparams)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
tiltaccuracy = train.diachronic_tilt(allvolumes, 'linear', [])
print("Divided with a line fit to the data trend, it's ", str(tiltaccuracy))
def applymodel():
modelpath = input('Path to model? ')
sourcefolder = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/fromEF'
extension = '.tsv'
metadatapath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/prestigeficmeta.csv'
newmetadict = train.apply_pickled_model(modelpath, sourcefolder, extension, metadatapath)
print('Got predictions for that model.')
outpath = '/Users/tunder/Dropbox/GenreProject/python/reception/poetryEF/mergedmeta.csv'
newmetadict.to_csv(outpath)
def comparison(selfmodel, othermodel, modelname):
totalvolumes = 0
right = 0
for v in selfmodel.index:
realgenre = selfmodel.loc[v, 'realclass']
v = str(v)
otherprediction = othermodel.loc[v, modelname]
if realgenre > .5 and otherprediction > 0.5:
right += 1
elif realgenre < .5 and otherprediction < 0.5:
right += 1
totalvolumes +=1
return totalvolumes, right
def getacc(filelist):
allofem = 0
allright = 0
for afile in filelist:
df = pd.read_csv(afile)
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df.logistic > 0.5))
tn = sum((df.realclass <= 0.5) & (df.logistic <= 0.5))
fp = sum((df.realclass <= 0.5) & (df.logistic > 0.5))
fn = sum((df.realclass > 0.5) & (df.logistic <= 0.5))
assert totalcount == (tp + fp + tn + fn)
allofem += totalcount
allright += (tp + tn)
return allright / allofem
if __name__ == '__main__':
args = sys.argv
if len(args) < 2:
c_range = [.00009, .0002, .0004, .0008, .0012, .002, .004, .008, .012, 0.3, 0.8, 2]
featurestart = 1500
featureend = 4000
featurestep = 100
genre_gridsearch('poeEF2', c_range, featurestart, featureend, featurestep, positive_tags = ['reviewed'], negative_tags = ['random'], excl_below = 1800, excl_above = 2000)
else:
c_range = [.0002, .0004, .0008, .0012, .002, .004, .008, .012, 0.3]
featurestart = 3800
featureend = 6800
featurestep = 400
ocr_gridsearch('ocrpoe9', c_range, featurestart, featureend, featurestep, positive_tags = ['reviewed'], negative_tags = ['random'], excl_below = 1800, excl_above = 2000)
|
mit
| 2,061,403,125,019,920,400
| 40.740933
| 178
| 0.676514
| false
| 3.585225
| true
| false
| false
|
pbdeuchler/deaddrop
|
deaddrop/settings.py
|
1
|
5647
|
import sys, os
import dj_database_url
# PATH vars
here = lambda *x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
PROJECT_ROOT = here("")
root = lambda *x: os.path.join(os.path.abspath(PROJECT_ROOT), *x)
sys.path.insert(0, root('apps'))
SENDGRID_API_KEY = os.environ["SENDGRID_API_KEY"]
TWILIO_ACCOUNT_SID = os.environ["TWILIO_ACCOUNT_SID"]
TWILIO_AUTH_TOKEN = os.environ["TWILIO_AUTH_TOKEN"]
TWILIO_FROM_NUMBER = os.environ["TWILIO_FROM_NUMBER"]
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ["SECRET_KEY"]
# Read only flag for maintenance
READ_ONLY = False
# SECURITY WARNING: don't run with debug turned on in production!
if os.getenv("ENVIRONMENT", "") == "dev":
DEBUG = True
else:
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = os.getenv('ALLOWED_HOSTS', 'localhost').split(',')
ADMINS = (
('Philip Deuchler', 'pbdeuchler@gmail.com'),
)
MANAGERS = ADMINS
# Application definition
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
CUSTOM_APPS = (
'deaddrop.api',
'deaddrop.web',
)
LIBRARY_APPS = (
'rest_framework',
# 'djoser',
# 'authtools',
'rest_framework_swagger',
# See: http://django-rest-framework.org/api-guide/authentication#tokenauthentication
# 'rest_framework.authtoken',
# 'bootstrap3',
)
INSTALLED_APPS = DJANGO_APPS + CUSTOM_APPS + LIBRARY_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
if os.getenv("ENVIRONMENT", "") == "dev":
MIDDLEWARE_CLASSES += (
# 'deaddrop.web.middleware.LogRequests',
# 'deaddrop.web.middleware.ReadOnly'
)
ROOT_URLCONF = 'deaddrop.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'deaddrop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {'default': dj_database_url.config()}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC' # 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = root('assets', 'uploads')
MEDIA_URL = '/media/'
# Additional locations of static files
STATICFILES_DIRS = (
root('assets'),
)
STATIC_ROOT = root('deaddrop/staticfiles/')
TEMPLATE_DIRS = (
root('templates'),
)
# AUTH_USER_MODEL = 'authtools.User'
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'filename': '/path/to/django/debug.log',
# },
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'console-verbose': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django.request': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'requests-dev': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'print-debug': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
},
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
# 'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
# ]
}
DJOSER = {
# 'DOMAIN': 'frontend.com',
# 'SITE_NAME': 'Frontend',
'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}',
'ACTIVATION_URL': '#/activate/{uid}/{token}',
'LOGIN_AFTER_ACTIVATION': True,
'SEND_ACTIVATION_EMAIL': True,
}
# test settings
if len(sys.argv) > 1 and 'test' in sys.argv[1]:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/deaddrop_test.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
|
bsd-3-clause
| 3,880,424,737,240,773,000
| 24.668182
| 95
| 0.618736
| false
| 3.416213
| false
| false
| false
|
acil-bwh/SlicerCIP
|
Scripted/CIP_LesionModel/FeatureExtractionLib/ParenchymalVolume.py
|
1
|
3862
|
from __main__ import vtk, qt, ctk, slicer
import numpy as np
from collections import OrderedDict
class ParenchymalVolume:
def __init__(self, parenchymaLabelmapArray, sphereWithoutTumorLabelmapArray, spacing, keysToAnalyze=None):
""" Parenchymal volume study.
Compare each ones of the different labels in the original labelmap with the volume of the area of interest
:param parenchymaLabelmapArray: original labelmap for the whole volume node
:param sphereWithoutTumorLabelmapArray: labelmap array that contains the sphere to study without the tumor
:param spacing: tuple of volume spacing
:param keysToAnalyze: list of strings with the types of emphysema it's going to be analyzed. When None,
all the types will be analyzed
"""
self.parenchymaLabelmapArray = parenchymaLabelmapArray
self.sphereWithoutTumorLabelmapArray = sphereWithoutTumorLabelmapArray
self.spacing = spacing
self.parenchymalVolumeStatistics = OrderedDict()
self.parenchymalVolumeStatisticsTiming = OrderedDict()
allKeys = list(self.getAllEmphysemaTypes().keys())
if keysToAnalyze is not None:
self.keysToAnalyze = keysToAnalyze.intersection(allKeys)
else:
self.keysToAnalyze = list(self.getAllEmphysemaTypes().keys())
@staticmethod
def getAllEmphysemaTypes():
""" All emphysema types and values
:return: dictionary of Type(string)-[numeric_code, description]
"""
return {
"Emphysema": 5,
"Mild paraseptal emphysema": 10,
"Moderate paraseptal emphysema": 11,
"Severe paraseptal emphysema": 12,
"Mild centrilobular emphysema": 16,
"Moderate centrilobular emphysema": 17,
"Severe centilobular emphysema": 18,
"Mild panlobular emphysema": 19,
"Moderate panlobular emphysema": 20,
"Severe panlobular emphysema": 21
}
@staticmethod
def getAllEmphysemaDescriptions():
return list(ParenchymalVolume.getAllEmphysemaTypes().keys())
def analyzeType(self, code):
print(("DEBUG: analyze code {0}.".format(code)))
# Calculate volume for the studied ROI (tumor)
totalVolume = np.sum(self.parenchymaLabelmapArray == code)
if totalVolume == 0:
return 0
# Calculate total volume in the sphere for this emphysema type
sphereVolume = np.sum(self.parenchymaLabelmapArray[self.sphereWithoutTumorLabelmapArray.astype(np.bool)] == code)
# Result: SV / PV
return float(sphereVolume) / totalVolume
def EvaluateFeatures(self, printTiming = False, checkStopProcessFunction=None):
# Evaluate dictionary elements corresponding to user-selected keys
# Remove all the keys that must not be evaluated
for key in set(self.parenchymalVolumeStatistics.keys()).difference(self.keys):
self.parenchymalVolumeStatistics[key] = None
types = self.getAllEmphysemaTypes()
if not printTiming:
for key in self.keysToAnalyze:
self.parenchymalVolumeStatistics[key] = self.analyzeType(types[key])
if checkStopProcessFunction is not None:
checkStopProcessFunction()
return self.parenchymalVolumeStatistics
else:
import time
t1 = time.time()
for key in self.keysToAnalyze:
self.parenchymalVolumeStatistics[key] = self.analyzeType(types[key])
self.parenchymalVolumeStatisticsTiming[key] = time.time() - t1
if checkStopProcessFunction is not None:
checkStopProcessFunction()
return self.parenchymalVolumeStatistics, self.parenchymalVolumeStatisticsTiming
|
bsd-3-clause
| -4,590,892,784,457,792,000
| 44.435294
| 121
| 0.667012
| false
| 4.027112
| false
| false
| false
|
RecipeML/Recipe
|
utils/partitionpy/progress.py
|
1
|
1067
|
# This Python file uses the following encoding: utf-8
# coding: utf-8
import sys
# Print iterations progress
def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 20):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
formatStr = "{0:." + str(decimals) + "f}"
percent = formatStr.format(100 * (iteration / float(total)))
filledLength = int(round(barLength * iteration / float(total)))
bar = '█' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
|
gpl-3.0
| 9,108,792,661,962,851,000
| 41.6
| 93
| 0.612207
| false
| 3.790036
| false
| false
| false
|
4degrees/segue
|
source/segue/frontend/exporter.py
|
1
|
5730
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import traceback
from PySide import QtGui
from .selector import SelectorWidget
from .options import OptionsWidget
from .worker import Worker
from ..backend.processor.foreground import ForegroundProcessor
class ExporterWidget(QtGui.QWidget):
'''Manage exporting.'''
def __init__(self, host, processors, parent=None):
'''Initialise with *host* application and *parent*.
*processors* should be a list of
:py:class:`~segue.backend.processor.base.Processor` instances to make
available as processor options.
'''
super(ExporterWidget, self).__init__(parent=parent)
self._host = None
self._processors = None
self.build()
self.post_build()
self.host = host
self.set_processors(processors)
# Auto-add any current selection.
items = self.host.get_selection()
self.selector_widget.add(items)
@property
def host(self):
'''Return current host application.'''
return self._host
@host.setter
def host(self, host):
'''Set host application to *host*.'''
self._host = host
self.options_widget.host = host
self.selector_widget.host = host
def get_processors(self):
'''Return current processors.'''
return self.options_widget.get_processors()
def set_processors(self, processors):
'''Set processors clearing any existing ones.'''
self.options_widget.set_processors(processors)
def build(self):
'''Build and layout the interface.'''
self.setLayout(QtGui.QVBoxLayout())
self.selector_widget = SelectorWidget(host=self.host)
self.selector_widget.setFrameStyle(
QtGui.QFrame.StyledPanel
)
self.layout().addWidget(self.selector_widget)
self.options_widget = OptionsWidget(host=self.host)
self.options_widget.setFrameStyle(
QtGui.QFrame.StyledPanel
)
self.layout().addWidget(self.options_widget)
self.export_button = QtGui.QPushButton('Export')
self.layout().addWidget(self.export_button)
self.progress_bar = QtGui.QProgressBar()
self.progress_bar.setTextVisible(False)
self.layout().addWidget(self.progress_bar)
self.progress_bar.hide()
def post_build(self):
'''Perform post-build operations.'''
self.setWindowTitle('Segue Exporter')
self.selector_widget.added.connect(self.validate)
self.selector_widget.removed.connect(self.validate)
self.options_widget.processor_widget.currentIndexChanged.connect(
self.validate
)
self.options_widget.target_widget.textChanged.connect(self.validate)
self.export_button.clicked.connect(self.export)
self.validate()
def validate(self, *args, **kw):
'''Validate options and update UI state.'''
self.export_button.setEnabled(False)
if not self.selector_widget.items():
return
processor = self.options_widget.processor_widget.itemData(
self.options_widget.processor_widget.currentIndex()
)
if processor is None:
return
target = self.options_widget.target_widget.text()
if not target:
return
self.export_button.setEnabled(True)
def export(self):
'''Perform export according to set options.'''
processor = self.options_widget.processor_widget.itemData(
self.options_widget.processor_widget.currentIndex()
)
self.export_button.hide()
self.progress_bar.setRange(0, 0) # Indeterminate
self.progress_bar.show()
options = {
'source': None,
'selection': self.selector_widget.items(),
'target': self.options_widget.target_widget.text(),
'start': self.options_widget.start_frame_widget.value(),
'stop': self.options_widget.stop_frame_widget.value(),
'step': self.options_widget.step_frame_widget.value(),
'rest': self.options_widget.rest_frame_widget.value()
}
# TODO: Can this be decoupled?
if not isinstance(processor, ForegroundProcessor):
temporary_file = self.host.save()
options['source'] = temporary_file
command = [self.host.save_package, None, options]
try:
worker = Worker(processor.process, command)
worker.start()
while worker.isRunning():
app = QtGui.QApplication.instance()
app.processEvents()
if worker.error:
raise worker.error[1], None, worker.error[2]
except Exception as error:
traceback.print_exc()
QtGui.QMessageBox.critical(
self,
'Process failed',
'Could not export selection!'
'\n{0}'.format(error)
)
else:
QtGui.QMessageBox.information(
self,
'Process completed',
'Selection exported successfully!'
'\n{0}'.format(worker.result or '')
)
finally:
self.progress_bar.setMaximum(1)
self.progress_bar.reset()
self.progress_bar.hide()
self.export_button.show()
|
apache-2.0
| 1,471,412,097,358,342,700
| 31.556818
| 77
| 0.578883
| false
| 4.547619
| false
| false
| false
|
ooici/coi-services
|
ion/agents/data/test/test_dsa_moas_ctdgv.py
|
1
|
4167
|
#!/usr/bin/env python
"""
@package ion.agents.data.test.test_moas_ctdgv
@file ion/agents/data/test_moas_ctdgv
@author Bill French
@brief End to end testing for moas ctdgv
"""
__author__ = 'Bill French'
import gevent
import os
from pyon.public import log
from nose.plugins.attrib import attr
from ion.agents.data.test.dataset_test import DatasetAgentTestCase
from ion.services.dm.test.dm_test_case import breakpoint
from pyon.agent.agent import ResourceAgentState
import unittest
###############################################################################
# Global constants.
###############################################################################
@attr('INT', group='sa')
class GliderCTDTest(DatasetAgentTestCase):
"""
Verify dataset agent can harvest data fails, parse the date, publish,
ingest and retrieve stored data.
"""
def setUp(self):
self.test_config.initialize(
instrument_device_name = 'CTDGV-01',
preload_scenario= 'GENG,CTDGV',
stream_name= 'ggldr_ctdgv_delayed',
# Uncomment this line to load driver from a local repository
#mi_repo = '/Users/wfrench/Workspace/code/wfrench/marine-integrations'
)
super(GliderCTDTest, self).setUp()
def test_parse(self):
"""
Verify file import and connection ids
"""
expected_state = {'version': 0.1,
'unit_363_2013_245_10_6.mrg': {'ingested': True, 'parser_state': {'position': 1852}, 'file_checksum': '31b4a31fb4a192ce67c89dfe32b72813', 'file_mod_date': 1391110766.0, 'file_size': 1852},
'unit_363_2013_245_6_6.mrg': {'ingested': True, 'parser_state': {'position': 5599}, 'file_checksum': 'e14ee0749eceb928390ed007b7d7ebd1', 'file_mod_date': 1391110815.0, 'file_size': 5914}}
self.assert_initialize()
self.assert_driver_state(None)
self.create_sample_data("moas_ctdgv/file_1.mrg", "unit_363_2013_245_6_6.mrg")
self.create_sample_data("moas_ctdgv/file_2.mrg", "unit_363_2013_245_10_6.mrg")
granules = self.get_samples(self.test_config.stream_name, 4)
self.assert_data_values(granules, 'moas_ctdgv/merged.result.yml')
self.assert_driver_state(expected_state)
self.assert_agent_state_after_restart()
self.assert_sample_queue_size(self.test_config.stream_name, 0)
def test_large_file(self):
"""
Verify a large file import with no buffering
"""
self.assert_initialize()
self.create_sample_data("moas_ctdgv/unit_363_2013_199_0_0.mrg", "unit_363_2013_199_0_0.mrg")
gevent.sleep(10)
self.assert_sample_queue_size(self.test_config.stream_name, 1)
self.create_sample_data("moas_ctdgv/unit_363_2013_199_1_0.mrg", "unit_363_2013_199_1_0.mrg")
gevent.sleep(10)
self.assert_sample_queue_size(self.test_config.stream_name, 2)
self.create_sample_data("moas_ctdgv/unit_363_2013_245_6_6.mrg", "unit_363_2013_245_6_6.mrg")
self.get_samples(self.test_config.stream_name, 171, 180)
self.assert_sample_queue_size(self.test_config.stream_name, 0)
def test_capabilities(self):
self.assert_agent_capabilities()
def test_lost_connection(self):
"""
Test a parser exception and verify that the lost connection logic works
"""
self.assert_initialize()
path = self.create_sample_data("moas_ctdgv/file_1.mrg", "unit_363_2013_245_6_6.mrg")
os.chmod(path, 0000)
self.assert_state_change(ResourceAgentState.LOST_CONNECTION)
# Sleep long enough to let the first reconnect happen and fail again.
gevent.sleep(65)
# Resolve the issue
os.chmod(path, 0755)
# We should transition back to streaming and stay there.
self.assert_state_change(ResourceAgentState.STREAMING, timeout=180)
self.create_sample_data("moas_ctdgv/file_2.mrg", "unit_363_2013_245_10_6.mrg")
granules = self.get_samples(self.test_config.stream_name, 4, timeout=30)
self.assert_data_values(granules, 'moas_ctdgv/merged.result.yml')
|
bsd-2-clause
| -7,171,835,464,924,616,000
| 36.205357
| 214
| 0.631629
| false
| 3.260563
| true
| false
| false
|
ibab/tensorprob
|
tensorprob/distribution.py
|
1
|
4055
|
from collections import Iterable
import numpy as np
import tensorflow as tf
from . import config
from . import utilities
from .model import Description, Model, ModelError, Region
class DistributionError(Exception):
pass
def _parse_bounds(num_dimensions, lower, upper, bounds):
def _parse_bounds_1D(lower, upper, bounds):
if not bounds:
lower = -np.inf if lower is None else lower
upper = np.inf if upper is None else upper
return [Region(lower, upper)]
bounds = [Region(*b) for b in bounds]
if None in utilities.flatten(bounds):
raise ValueError
return bounds
try:
if num_dimensions == len(bounds) and isinstance(bounds[0][0], Iterable):
bounds = [_parse_bounds_1D(lower, upper, b) for b in bounds]
else:
# Set the same bounds for all variables
bounds = [_parse_bounds_1D(lower, upper, bounds)]*num_dimensions
except Exception:
raise ValueError("Failed to parse 'bounds'")
else:
return bounds
def Distribution(distribution_init):
def f(*args, **kwargs):
# Why legacy Python, why...
lower = kwargs.get('lower')
upper = kwargs.get('upper')
bounds = kwargs.get('bounds', [])
name = kwargs.get('name')
if Model is None or tf.get_default_graph() is not Model.current_model._model_graph:
raise ModelError(
"Can't define distributions outside of a model block")
if bounds and (lower is not None or upper is not None):
raise DistributionError(
"'lower'/'upper' can't be used incombination with 'bounds'")
name = name or utilities.generate_name(distribution_init)
Distribution.logp = None
Distribution.integral = None
Distribution.bounds = lambda ndim: _parse_bounds(ndim, lower, upper, bounds)
variables = distribution_init(*args, name=name)
# One dimensional distributions return a value, convert it to a tuple
if not isinstance(variables, tuple):
variables = (variables,)
# Ensure the distribution has set the required properties
if Distribution.logp is None:
raise DistributionError('Distributions must define logp')
if Distribution.integral is None:
raise NotImplementedError('Numeric integrals are not yet supported')
# Parse the bounds to be a list of lists of Regions
bounds = Distribution.bounds(len(variables))
# Force logp to negative infinity when outside the allowed bounds
for var, bound in zip(variables, bounds):
conditions = []
for l, u in bound:
lower_is_neg_inf = not isinstance(l, tf.Tensor) and np.isneginf(l)
upper_is_pos_inf = not isinstance(u, tf.Tensor) and np.isposinf(u)
if not lower_is_neg_inf and upper_is_pos_inf:
conditions.append(tf.greater(var, l))
elif lower_is_neg_inf and not upper_is_pos_inf:
conditions.append(tf.less(var, u))
elif not (lower_is_neg_inf or upper_is_pos_inf):
conditions.append(tf.logical_and(tf.greater(var, l), tf.less(var, u)))
if len(conditions) > 0:
is_inside_bounds = conditions[0]
for condition in conditions[1:]:
is_inside_bounds = tf.logical_or(is_inside_bounds, condition)
Distribution.logp = tf.select(
is_inside_bounds,
Distribution.logp,
tf.fill(tf.shape(var), config.dtype(-np.inf))
)
# Add the new variables to the model description
for variable, bound in zip(variables, bounds):
Model.current_model._description[variable] = Description(
Distribution.logp, Distribution.integral, bound
)
return variable if len(variables) == 1 else variables
return f
|
mit
| -929,911,865,791,577,300
| 36.546296
| 91
| 0.603699
| false
| 4.398048
| false
| false
| false
|
uq-eresearch/uqam
|
subcollections/models.py
|
1
|
13830
|
from django.db import models
from datetime import datetime
from django.db.models.signals import post_save, post_delete
from cat.models import Category
from location.models import GlobalRegion, Country, StateProvince, RegionDistrict, Locality
from django.utils.xmlutils import SimplerXMLGenerator
from django.core.urlresolvers import reverse
from utils.utils import get_site_url
import StringIO
from django.contrib.sites.models import Site
from django.utils.feedgenerator import rfc3339_date
import logging
import requests
logger = logging.getLogger(__name__)
class Collection(models.Model):
title = models.CharField(max_length=120)
description = models.TextField()
author = models.ForeignKey('auth.User', null=True, blank=True)
items = models.ManyToManyField('cat.MuseumObject',
related_name='collections', blank=True)
is_public = models.BooleanField(
help_text="Should collection be visible to the public")
is_syndicated = models.BooleanField(
help_text="Should collection be sent for syndication")
rights = models.TextField(
help_text="Information about rights held in and over the entity")
access_rights = models.TextField(
help_text="Information about who can access the entity, "
"including access restrictions based on privacy, security, "
"or other policies.")
updated = models.DateTimeField(auto_now=True, editable=False,
help_text="Date the collection was last edited")
created = models.DateTimeField(auto_now_add=True, editable=False,
help_text="Date the collection was initially created")
edit_url = models.URLField(verify_exists=False, blank=True, editable=False,
help_text="Remotely assigned URL for updating syndicated data")
last_published = models.DateTimeField(blank=True, null=True,
editable=False, help_text="Date the collection was last published,"
" or edited while published")
date_published = models.DateTimeField(blank=True, null=True,
editable=False,
help_text="Date the collection was first published")
last_syndicated = models.DateTimeField(blank=True, null=True,
editable=False,
help_text="Date the collection was sent for syndication")
syndication_result = models.TextField(blank=True, editable=False,
help_text="Result from last syndication submission")
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if self.is_public:
if not self.date_published:
self.date_published = datetime.now()
self.last_published = datetime.now()
super(Collection, self).save(*args, **kwargs)
@models.permalink
def get_absolute_url(self):
return ('collection_detail', [str(self.id)])
def get_atom_url(self):
return reverse('collection_atom_detail', args=[self.id])
@staticmethod
def entry_attributes():
return {u"xmlns": u"http://www.w3.org/2005/Atom",
u"xmlns:rdfa": u"http://www.w3.org/ns/rdfa#",
u"xmlns:georss": u"http://www.georss.org/georss"}
def get_categories(self):
"""Queryset of categories of items in collection"""
items = self.items.all()
return Category.objects.filter(
museumobject__in=items).distinct()
def get_places(self):
"""
Get all places referenced by items in this collection
Returns a list of each place, ignoring places with blank names
and ignoring places with duplicate names, even if they are different 'types'
of place.
Ordered from most all encompassing to most detailed.
"""
items = self.items.all()
names_set = set()
places = []
for place_type in (Locality, RegionDistrict, StateProvince, Country, GlobalRegion):
for place in place_type.objects.filter(museumobject__in=items).distinct():
if place.name and place.name not in names_set:
names_set.add(place.name)
places.append(place)
places.reverse()
return places
def public_items(self):
"""
Return a queryset of all public items
"""
return self.items.filter(public=True)
def as_atom(self, encoding='utf-8'):
"""
Serialise to an Atom format
Uses the profile from http://dataspace.metadata.net/doc/atom
"""
syndication = Syndication.objects.get(id=1)
output = StringIO.StringIO()
site = Site.objects.get(id=1)
link = get_site_url(site, self.get_absolute_url())
site_id = get_site_url(site, "/")
handler = SimplerXMLGenerator(output, encoding)
handler.startDocument()
handler.startElement(u"entry", self.entry_attributes())
handler.addQuickElement(u"id", link)
handler.addQuickElement(u"title", self.title)
handler.addQuickElement(u'content', self.description, {'type': 'html'})
if self.date_published:
handler.addQuickElement(u"published", rfc3339_date(self.date_published).decode('utf-8'))
if self.last_published:
handler.addQuickElement(u"updated", rfc3339_date(self.last_published).decode('utf-8'))
handler.addQuickElement(u"link", attrs={
u'href': 'http://purl.org/dc/dcmitype/Collection',
u'rel': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type',
u'title': 'Collection'})
handler.addQuickElement(u"rights", self.rights)
handler.startElement(u"rdfa:meta",
{u'property': u'http://purl.org/dc/terms/accessRights',
u'content': self.access_rights})
handler.endElement(u"rdfa:meta")
handler.addQuickElement(u'link', attrs={
u'rel': u'http://purl.org/dc/terms/publisher',
u'href': syndication.curator_href,
u'title': syndication.curator_name
})
handler.startElement(u"source", {})
handler.addQuickElement(u"id", site_id)
handler.addQuickElement(u"title", site.name)
handler.startElement(u"author", {})
handler.addQuickElement(u"name", self.author.get_full_name())
handler.addQuickElement(u"email", self.author.email)
handler.endElement(u"author")
handler.endElement(u"source")
handler.startElement(u"link",
{u"rel": "http://xmlns.com/foaf/0.1/page",
u"href": link})
handler.endElement(u"link")
handler.addQuickElement(u'category', attrs={
u'term': u'http://purl.org/asc/1297.0/2008/for/1601',
u'scheme': u'http://purl.org/asc/1297.0/2008/for/',
u'label': u'1601 Anthropology'
})
# Published control
draft = u'no' if self.is_public else u'yes'
handler.startElement(u'app:control',
{u'xmlns:app': u'http://www.w3.org/2007/app'})
handler.addQuickElement(u'app:draft', draft)
handler.endElement(u'app:control')
self._add_categories(handler, site)
self._add_spatial(handler, site)
handler.endElement(u"entry")
return output.getvalue()
def _add_categories(self, handler, site):
for category in self.get_categories():
# TODO: add this back when dataspace is fixed
# cat_url = get_site_url(site, category.get_absolute_url())
handler.addQuickElement(u'category', attrs={
# u'term': cat_url,
u'term': unicode(category.name)
})
def _add_spatial(self, handler, site):
for place in self.get_places():
place_url = get_site_url(site, place.get_absolute_url())
handler.addQuickElement(u'link', attrs={
u'rel': u'http://purl.org/dc/terms/spatial',
u'href': place_url,
u'title': unicode(place)
})
if place.latitude is not None:
handler.addQuickElement(u'georss:point',
unicode(place.latitude) + u" " + unicode(place.longitude)
)
def update_after_syndication(self, response):
"""
Update collection with date and syndication edit url
"""
self.syndication_result = response.text
self.last_syndicated = datetime.now()
self.save()
self.edit_url = self.find_edit_url(response.text)
@staticmethod
def find_edit_url(atom_string):
from xml.etree import ElementTree
tree = ElementTree.fromstring(atom_string)
alllinks = tree.findall('{http://www.w3.org/2005/Atom}link')
return [c.get('href') for c in alllinks if c.get('rel') == 'edit'][0]
# The following is much nicer, but only works in python 2.7+ *sadface*
# return tree.find('{http://www.w3.org/2005/Atom}link[@rel="edit"]').get('href')
class Syndication(models.Model):
remote_url = models.CharField(max_length=300)
username = models.CharField(max_length=100, blank=True)
password = models.CharField(max_length=100, blank=True)
curator_href = models.CharField(max_length=200, blank=True)
curator_name = models.CharField(max_length=200, blank=True)
# Constants
content_type = "application/atom+xml"
def __init__(self, *args, **kwargs):
super(Syndication, self).__init__(*args, **kwargs)
self.login_url = self.remote_url + "login"
self.collections_url = self.remote_url + "collections"
self.login_data = {'username': self.username,
'password': self.password}
self.headers = {'content-type': self.content_type}
def __unicode__(self):
return self.remote_url
def _login(self):
"""
Login to syndication server
"""
s = requests.session()
self.session = s
login = s.post(self.login_url, data=self.login_data)
if login.status_code == 200:
return True
else:
logger.error("Error logging in to syndication server %s",
self.login_url)
return False
def syndicate_collection(self, collection):
"""
Submit the collection to the syndication server
"""
if self._login():
if collection.edit_url == '':
self._post_new(collection)
else:
self._update(collection)
else:
collection.syndication_result = 'Failed: unable to login to server'
collection.save()
def _post_new(self, collection):
session = self.session
response = session.post(self.collections_url,
data=collection.as_atom(), headers=self.headers)
if response.status_code == 201:
collection.update_after_syndication(response)
else:
# record failure
logger.error('Collection (id=%s) syndication POST (to %s) failed: %s',
collection.id, self.collections_url, response.text)
collection.syndication_result = response.text
collection.save()
def _update(self, collection):
session = self.session
response = session.put(collection.edit_url,
data=collection.as_atom(), headers=self.headers)
if response.status_code == 200:
collection.update_after_syndication(response)
else:
# record failure
logger.error('Collection (id=%s) syndication PUT (to %s) failed: %s',
collection.id, collection.edit_url, response.text)
collection.syndication_result = response.text
collection.save()
def delete_collection(self, collection):
"""
Remove a collection from the syndication server
"""
if collection.edit_url == '':
logger.error('Unable to remove un-syndicated collection (id=%s)',
collection.id)
return
if self._login():
session = self.session
response = session.delete(collection.edit_url)
if response.status_code == 200:
collection.edit_url = ''
logger.info('Removed Collection (id=%s) from syndication server', collection.id)
else:
logger.error('Unable to remove collection (id=%s) from syndication server: %s %s',
response.status_code, response.text)
else:
logger.error('Unable to login to syndication server to remove collection (id=%s)',
collection.id)
def queue_for_syndication(instance, **kwargs):
collection = instance
# Collection is updated with new dates and edit urls
# we need to disconnect signal handler to prevent a loop
post_save.disconnect(queue_for_syndication, sender=Collection)
if collection.is_syndicated:
syndication = Syndication.objects.get(id=1)
try:
syndication.syndicate_collection(collection)
except:
logger.exception("Error syndicating collection (id=%s)", collection.id)
else:
if collection.edit_url != '':
syndication = Syndication.objects.get(id=1)
syndication.delete_collection(collection)
collection.save()
post_save.connect(queue_for_syndication, sender=Collection)
post_save.connect(queue_for_syndication, sender=Collection)
def delete_from_syndication(instance, **kwargs):
collection = instance
if collection.is_syndicated:
syndication = Syndication.objects.get(id=1)
syndication.delete_collection(collection)
post_delete.connect(delete_from_syndication, sender=Collection)
|
bsd-3-clause
| 478,843,069,451,846,800
| 36.378378
| 100
| 0.617137
| false
| 4.053341
| false
| false
| false
|
hasgeek/funnel
|
migrations/versions/d0097ec29880_fix_membership_granted_by.py
|
1
|
1147
|
"""Fix membership granted_by.
Revision ID: d0097ec29880
Revises: bd465803af3a
Create Date: 2021-04-22 05:20:50.774828
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd0097ec29880'
down_revision = 'bd465803af3a'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column(
'commentset_membership',
'granted_by_id',
existing_type=sa.INTEGER(),
nullable=False,
)
op.alter_column(
'proposal_membership',
'granted_by_id',
existing_type=sa.INTEGER(),
nullable=False,
)
op.alter_column(
'site_membership', 'granted_by_id', existing_type=sa.INTEGER(), nullable=False
)
def downgrade():
op.alter_column(
'site_membership', 'granted_by_id', existing_type=sa.INTEGER(), nullable=True
)
op.alter_column(
'proposal_membership',
'granted_by_id',
existing_type=sa.INTEGER(),
nullable=True,
)
op.alter_column(
'commentset_membership',
'granted_by_id',
existing_type=sa.INTEGER(),
nullable=True,
)
|
agpl-3.0
| 5,611,952,987,367,919,000
| 21.057692
| 86
| 0.617262
| false
| 3.344023
| false
| false
| false
|
nextgis/ngw_external_api_python
|
core/ngw_resource.py
|
1
|
5940
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
NextGIS WEB API
-------------------
begin : 2014-11-19
git sha : $Format:%H$
copyright : (C) 2014 by NextGIS
email : info@nextgis.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from os import path
import urllib.parse
from ..utils import ICONS_DIR, log
API_RESOURCE_URL = lambda res_id: '/api/resource/%d' % res_id
API_COLLECTION_URL = '/api/resource/'
RESOURCE_URL = lambda res_id: '/resource/%d' % res_id
API_LAYER_EXTENT = lambda res_id: '/api/resource/%d/extent' % res_id
class Wrapper():
def __init__(self, **params):
self.__dict__.update(params)
DICT_TO_OBJ = lambda d: Wrapper(**d)
LIST_DICT_TO_LIST_OBJ = lambda l: [Wrapper(**el) for el in l]
class NGWResource():
type_id = 'resource'
icon_path = path.join(ICONS_DIR, 'resource.svg')
type_title = 'NGW Resource'
# STATIC
@classmethod
def receive_resource_obj(cls, ngw_con, res_id):
"""
:rtype : json obj
"""
return ngw_con.get(API_RESOURCE_URL(res_id))
@classmethod
def receive_resource_children(cls, ngw_con, res_id):
"""
:rtype : json obj
"""
return ngw_con.get("%s?parent=%s" % (API_COLLECTION_URL, res_id))
@classmethod
def delete_resource(cls, ngw_resource):
ngw_con = ngw_resource._res_factory.connection
url = API_RESOURCE_URL(ngw_resource.common.id)
ngw_con.delete(url)
# INSTANCE
def __init__(self, resource_factory, resource_json, children_count=None):
"""
Init resource from json representation
:param ngw_resource: any ngw_resource
"""
self._res_factory = resource_factory
self._json = resource_json
self._construct()
self.children_count = children_count
def set_children_count(self, children_count):
self.children_count = children_count
def _construct(self):
"""
Construct resource from self._json
Can be overridden in a derived class
"""
#resource
self.common = DICT_TO_OBJ(self._json['resource'])
if self.common.parent:
self.common.parent = DICT_TO_OBJ(self.common.parent)
if self.common.owner_user:
self.common.owner_user = DICT_TO_OBJ(self.common.owner_user)
#resmeta
if 'resmeta' in self._json:
self.metadata = DICT_TO_OBJ(self._json['resmeta'])
def get_parent(self):
if self.common.parent:
return self._res_factory.get_resource(self.common.parent.id)
else:
return None
def get_children(self):
children = []
if self.common.children:
children_json = NGWResource.receive_resource_children(self._res_factory.connection, self.common.id)
for child_json in children_json:
children.append(self._res_factory.get_resource_by_json(child_json))
return children
def get_absolute_url(self):
return self._res_factory.connection.server_url + RESOURCE_URL(self.common.id)
def get_absolute_api_url(self):
return self._res_factory.connection.server_url + API_RESOURCE_URL(self.common.id)
# def get_absolute_url_with_auth(self):
# creds = self._res_factory.connection.get_auth()
# return self._res_factory.connection.server_url.replace('://', '://%s:%s@' % creds) + RESOURCE_URL(self.common.id)
def get_absolute_api_url_with_auth(self):
creds = self._res_factory.connection.get_auth()
#url = self._res_factory.connection.server_url.replace('://', '://%s:%s@' % creds) + API_RESOURCE_URL(self.common.id)
url = self._res_factory.connection.server_url.replace('://', '://{login}:{password}@') + API_RESOURCE_URL(self.common.id)
url = url.format(login=creds[0], password=urllib.parse.quote_plus(creds[1]))
return url
def get_relative_url(self):
return RESOURCE_URL(self.common.id)
def get_relative_api_url(self):
return API_RESOURCE_URL(self.common.id)
@classmethod
def get_api_collection_url(cls):
return API_COLLECTION_URL
def change_name(self, name):
new_name = self.generate_unique_child_name(name)
params = dict(
resource=dict(
display_name=new_name,
),
)
connection = self._res_factory.connection
url = self.get_relative_api_url()
connection.put(url, params=params)
self.update()
def update(self):
self._json = self.receive_resource_obj(
self._res_factory.connection,
self.common.id
)
self._construct()
children = self.get_children()
self.set_children_count(len(children))
def generate_unique_child_name(self, name):
chd_names = [ch.common.display_name for ch in self.get_children()]
new_name = name
id = 1
if new_name in chd_names:
while(new_name in chd_names):
new_name = name + "(%d)" % id
id += 1
return new_name
|
gpl-2.0
| 3,923,616,607,574,778,000
| 33.137931
| 129
| 0.543098
| false
| 3.907895
| false
| false
| false
|
heyglen/netobj
|
nettool/host.py
|
1
|
2534
|
# -*- coding: utf-8 -*-
from nettool.hostname import Hostname
from nettool.hostname_list import HostnameList
from nettool.utilities import raise_type_exception
class Host(HostnameList):
""" Represents all the names and IPs referring to the same host """
def __init__(self, value, ip=None):
super(Host, self).__init__()
self._add(value, ip)
def __eq__(self, value):
return self.__contains__(value)
def _update_hostname_attributes(self, hostname):
""" Update exisiting hostnames without an attribute with the new hosts' attribute """
for attribute in ('domain', 'ip'):
for self_host in self._host_entries:
if hostname.name == self_host.name:
if not getattr(self_host, attribute):
setattr(self_host, attribute, getattr(hostname, attribute))
def _add_hostname_new_ip(self, hostname):
for entry in self._host_entries:
if entry.fqdn == hostname.fqdn and entry.ip != hostname.ip:
self._append(hostname)
return
def _add_ip_new_hostname(self, hostname):
for entry in self._host_entries:
if entry.ip == hostname.ip and entry.name != hostname.name:
self._append(hostname)
return
def add(self, value, ip=None):
""" Merges a value with existing host entry values """
if isinstance(value, basestring):
value = Hostname(value, ip=ip)
if not isinstance(value, Hostname):
raise_type_exception(value, (Hostname, ), 'add')
if value not in self._host_entries:
raise ValueError('Host {} does not belong to {}'.format(value, self))
self._update_hostname_attributes(value)
for entry in self._host_entries:
if value.fqdn == entry.fqdn:
if entry.ip == value.ip:
# Full match found. Do nothing
return
self._add_hostname_new_ip(value)
self._add_ip_new_hostname(value)
@property
def display_hostname(self):
display = 'unknown'
for hostname in self._host_entries:
if display == 'unknown':
display = hostname.fqdn
elif len(hostname.fqdn) > display:
display = hostname.fqdn
return display
def __str__(self):
return 'Host {}'.format(self.display_hostname)
def __repr__(self):
return '<Host {}>'.format(self.display_hostname)
|
mit
| 8,635,459,251,104,732,000
| 33.243243
| 93
| 0.579321
| false
| 4.339041
| false
| false
| false
|
Bam4d/neon
|
examples/imagenet_allcnn.py
|
1
|
4337
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
AllCNN style convnet on imagenet data.
Reference:
Striving for Simplicity: the All Convolutional Net `[Springenberg2014]`_
.. _[Springenberg2014]: http://arxiv.org/pdf/1412.6806.pdf
"""
from neon.util.argparser import NeonArgparser
from neon.backends import gen_backend
from neon.initializers import GlorotUniform
from neon.optimizers import GradientDescentMomentum, Schedule
from neon.layers import Conv, Dropout, Activation, Pooling, GeneralizedCost, DataTransform
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, Normalizer
from neon.models import Model
from neon.callbacks.callbacks import Callbacks
from neon.data import ImageLoader
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--deconv', action='store_true',
help='save visualization data from deconvolution')
args = parser.parse_args()
# hyperparameters
batch_size = 64
# setup backend
be = gen_backend(backend=args.backend,
batch_size=batch_size,
rng_seed=args.rng_seed,
device_id=args.device_id,
datatype=args.datatype)
# setup data provider
img_set_options = dict(repo_dir=args.data_dir,
inner_size=224,
dtype=args.datatype,
subset_pct=100)
train = ImageLoader(set_name='train', **img_set_options)
test = ImageLoader(set_name='validation', do_transforms=False, **img_set_options)
relu = Rectlin()
init_uni = GlorotUniform()
# The parameters below are straight out of [Springenberg2014]
opt_gdm = GradientDescentMomentum(learning_rate=0.01,
schedule=Schedule(step_config=[10],
change=0.1),
momentum_coef=0.9, wdecay=.0005)
# set up model layers
layers = []
layers.append(DataTransform(transform=Normalizer(divisor=128.)))
layers.append(Conv((11, 11, 96), init=init_uni, activation=relu, strides=4, padding=1))
layers.append(Conv((1, 1, 96), init=init_uni, activation=relu, strides=1))
layers.append(Conv((3, 3, 96), init=init_uni, activation=relu, strides=2, padding=1)) # 54->27
layers.append(Conv((5, 5, 256), init=init_uni, activation=relu, strides=1)) # 27->23
layers.append(Conv((1, 1, 256), init=init_uni, activation=relu, strides=1))
layers.append(Conv((3, 3, 256), init=init_uni, activation=relu, strides=2, padding=1)) # 23->12
layers.append(Conv((3, 3, 384), init=init_uni, activation=relu, strides=1, padding=1))
layers.append(Conv((1, 1, 384), init=init_uni, activation=relu, strides=1))
layers.append(Conv((3, 3, 384), init=init_uni, activation=relu, strides=2, padding=1)) # 12->6
layers.append(Dropout(keep=0.5))
layers.append(Conv((3, 3, 1024), init=init_uni, activation=relu, strides=1, padding=1))
layers.append(Conv((1, 1, 1024), init=init_uni, activation=relu, strides=1))
layers.append(Conv((1, 1, 1000), init=init_uni, activation=relu, strides=1))
layers.append(Pooling(6, op='avg'))
layers.append(Activation(Softmax()))
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
mlp = Model(layers=layers)
if args.model_file:
import os
assert os.path.exists(args.model_file), '%s not found' % args.model_file
mlp.load_params(args.model_file)
# configure callbacks
callbacks = Callbacks(mlp, train, eval_set=test, **args.callback_args)
if args.deconv:
callbacks.add_deconv_callback(train, test)
mlp.fit(train, optimizer=opt_gdm, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
|
apache-2.0
| -934,133,652,730,482,200
| 38.788991
| 98
| 0.669126
| false
| 3.450278
| false
| false
| false
|
lukaszb/django-richtemplates
|
example_project/examples/admin.py
|
1
|
1099
|
from django import forms
from django.contrib import admin
from examples.models import Task, Status, Project, Priority
from richtemplates.forms import RestructuredTextAreaField
class TaskFormAdmin(forms.ModelForm):
content = RestructuredTextAreaField()
class Meta:
model = Task
class TaskAdmin(admin.ModelAdmin):
list_displa = ['project', 'summary', 'created_at', 'author', 'edited_at',
'editor', 'status', 'priority']
list_filter = ['author', 'status', 'priority']
date_hierarchy = 'created_at'
save_on_top = True
search_fields = ['summary', 'content']
form = TaskFormAdmin
class StatusInline(admin.StackedInline):
model = Status
extra = 1
class PriorityInline(admin.StackedInline):
model = Priority
extra = 1
class ProjectAdmin(admin.ModelAdmin):
list_displa = ['id', 'name', 'author']
save_on_top = True
search_fields = ['name']
inlines = [StatusInline, PriorityInline]
admin.site.register(Task, TaskAdmin)
admin.site.register(Status)
admin.site.register(Project, ProjectAdmin)
admin.site.register(Priority)
|
bsd-3-clause
| 326,583,783,829,779,600
| 26.475
| 77
| 0.705187
| false
| 3.687919
| false
| false
| false
|
mcbor/adventofcode
|
2017/09a.py
|
1
|
4643
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
09a.py
~~~~~~
Advent of Code 2017 - Day 9: Stream Processing
Part One
A large stream blocks your path. According to the locals, it's not safe to
cross the stream at the moment because it's full of garbage. You look down
at the stream; rather than water, you discover that it's a stream of
characters.
You sit for a while and record part of the stream (your puzzle input).
The characters represent groups - sequences that begin with { and end
with }. Within a group, there are zero or more other things, separated by
commas: either another group or garbage. Since groups can contain other
groups, a } only closes the most-recently-opened unclosed group - that is,
they are nestable. Your puzzle input represents a single, large group which
itself contains many smaller ones.
Sometimes, instead of a group, you will find garbage. Garbage begins with
< and ends with >. Between those angle brackets, almost any character can
appear, including { and }. Within garbage, < has no special meaning.
In a futile attempt to clean up the garbage, some program has canceled
some of the characters within it using !: inside garbage, any character
that comes after ! should be ignored, including <, >, and even another !.
You don't see any characters that deviate from these rules. Outside
garbage, you only find well-formed groups, and garbage always terminates
according to the rules above.
Here are some self-contained pieces of garbage:
- <>, empty garbage.
- <random characters>, garbage containing random characters.
- <<<<>, because the extra < are ignored.
- <{!>}>, because the first > is canceled.
- <!!>, because the second ! is canceled, allowing the > to terminate the
garbage.
- <!!!>>, because the second ! and the first > are canceled.
- <{o"i!a,<{i<a>, which ends at the first >.
Here are some examples of whole streams and the number of groups they
contain:
- {}, 1 group.
- {{{}}}, 3 groups.
- {{},{}}, also 3 groups.
- {{{},{},{{}}}}, 6 groups.
- {<{},{},{{}}>}, 1 group (which itself contains garbage).
- {<a>,<a>,<a>,<a>}, 1 group.
- {{<a>},{<a>},{<a>},{<a>}}, 5 groups.
- {{<!>},{<!>},{<!>},{<a>}}, 2 groups (since all but the last > are
canceled).
Your goal is to find the total score for all groups in your input. Each
group is assigned a score which is one more than the score of the group
that immediately contains it. (The outermost group gets a score of 1.)
{}, score of 1.
{{{}}}, score of 1 + 2 + 3 = 6.
{{},{}}, score of 1 + 2 + 2 = 5.
{{{},{},{{}}}}, score of 1 + 2 + 3 + 3 + 3 + 4 = 16.
{<a>,<a>,<a>,<a>}, score of 1.
{{<ab>},{<ab>},{<ab>},{<ab>}}, score of 1 + 2 + 2 + 2 + 2 = 9.
{{<!!>},{<!!>},{<!!>},{<!!>}}, score of 1 + 2 + 2 + 2 + 2 = 9.
{{<a!>},{<a!>},{<a!>},{<ab>}}, score of 1 + 2 = 3.
What is the total score for all groups in your input?
:copyright: (c) 2017 by Martin Bor.
:license: MIT, see LICENSE for more details.
"""
import sys
def garbage(stream):
"""Skip over any garbage in the stream, properly handling escaped (!)
characters
:stream: stream of characters
"""
for c in stream:
if c == '!':
# escape, skip the next char
next(stream)
elif c == '>':
return
def group(stream, level):
"""Return total score of this subgroup
:stream: stream of character
:level: current level
:returns: total score of this subgroup
"""
score = level
for c in stream:
if c == '}':
return score
elif c == '<':
garbage(stream)
elif c == '{':
score += group(stream, level + 1)
return score
def solve(stream):
"""Total score for all groups in the stream.
:stream: stream of characters
:return: total score
>>> solve('{}')
1
>>> solve('{{{}}}')
6
>>> solve('{{},{}}')
5
>>> solve('{{{},{},{{}}}}')
16
>>> solve('{<a>,<a>,<a>,<a>}')
1
>>> solve('{{<ab>},{<ab>},{<ab>},{<ab>}}')
9
>>> solve('{{<!!>},{<!!>},{<!!>},{<!!>}}')
9
>>> solve('{{<a!>},{<a!>},{<a!>},{<ab>}}')
3
"""
return group(iter(stream), 0)
def main(argv):
if len(argv) == 2:
f = open(argv[1], 'r')
else:
sys.stderr.write('reading from stdin...\n')
f = sys.stdin
print(solve(f.read().strip()))
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
mit
| 3,018,516,245,006,827,000
| 29.748344
| 79
| 0.56149
| false
| 3.624512
| false
| false
| false
|
larsyencken/csvdiff
|
csvdiff/__init__.py
|
1
|
8525
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# __init__.py
# csvdiff
#
import sys
from typing.io import TextIO
import io
import click
from . import records, patch, error
__author__ = 'Lars Yencken'
__email__ = 'lars@yencken.org'
__version__ = '0.3.1'
# exit codes for the command-line
EXIT_SAME = 0
EXIT_DIFFERENT = 1
EXIT_ERROR = 2
def diff_files(from_file, to_file, index_columns, sep=',', ignored_columns=None):
"""
Diff two CSV files, returning the patch which transforms one into the
other.
"""
with open(from_file) as from_stream:
with open(to_file) as to_stream:
from_records = records.load(from_stream, sep=sep)
to_records = records.load(to_stream, sep=sep)
return patch.create(from_records, to_records, index_columns,
ignore_columns=ignored_columns)
def diff_records(from_records, to_records, index_columns):
"""
Diff two sequences of dictionary records, returning the patch which
transforms one into the other.
"""
return patch.create(from_records, to_records, index_columns)
def patch_file(patch_stream: TextIO, fromcsv_stream: TextIO, tocsv_stream: TextIO,
strict: bool = True, sep: str = ','):
"""
Apply the patch to the source CSV file, and save the result to the target
file.
"""
diff = patch.load(patch_stream)
from_records = records.load(fromcsv_stream, sep=sep)
to_records = patch.apply(diff, from_records, strict=strict)
# what order should the columns be in?
if to_records:
# have data, use a nice ordering
all_columns = to_records[0].keys()
index_columns = diff['_index']
fieldnames = _nice_fieldnames(all_columns, index_columns)
else:
# no data, use the original order
fieldnames = from_records.fieldnames
records.save(to_records, fieldnames, tocsv_stream)
def patch_records(diff, from_records, strict=True):
"""
Apply the patch to the sequence of records, returning the transformed
records.
"""
return patch.apply(diff, from_records, strict=strict)
def _nice_fieldnames(all_columns, index_columns):
"Indexes on the left, other fields in alphabetical order on the right."
non_index_columns = set(all_columns).difference(index_columns)
return index_columns + sorted(non_index_columns)
class CSVType(click.ParamType):
name = 'csv'
def convert(self, value, param, ctx):
if isinstance(value, bytes):
try:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
value = value.decode(enc)
except UnicodeError:
try:
value = value.decode(sys.getfilesystemencoding())
except UnicodeError:
value = value.decode('utf-8', 'replace')
return value.split(',')
return value.split(',')
def __repr__(self):
return 'CSV'
@click.command()
@click.argument('index_columns', type=CSVType())
@click.argument('from_csv', type=click.Path(exists=True))
@click.argument('to_csv', type=click.Path(exists=True))
@click.option('--style',
type=click.Choice(['compact', 'pretty', 'summary']),
default='compact',
help=('Instead of the default compact output, pretty-print '
'or give a summary instead'))
@click.option('--output', '-o', type=click.Path(),
help='Output to a file instead of stdout')
@click.option('--quiet', '-q', is_flag=True,
help="Don't output anything, just use exit codes")
@click.option('--sep', default=',',
help='Separator to use between fields [default: comma]')
@click.option('--ignore-columns', '-i', type=CSVType(),
help='a comma seperated list of columns to ignore from the comparison')
@click.option('--significance', type=int,
help='Ignore numeric changes less than this number of significant figures')
def csvdiff_cmd(index_columns, from_csv, to_csv, style=None, output=None,
sep=',', quiet=False, ignore_columns=None, significance=None):
"""
Compare two csv files to see what rows differ between them. The files
are each expected to have a header row, and for each row to be uniquely
identified by one or more indexing columns.
"""
if ignore_columns is not None:
for i in ignore_columns:
if i in index_columns:
error.abort("You can't ignore an index column")
ostream = (open(output, 'w') if output
else io.StringIO() if quiet
else sys.stdout)
try:
if style == 'summary':
_diff_and_summarize(from_csv, to_csv, index_columns, ostream,
sep=sep, ignored_columns=ignore_columns,
significance=significance)
else:
compact = (style == 'compact')
_diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=compact, sep=sep, ignored_columns=ignore_columns,
significance=significance)
except records.InvalidKeyError as e:
error.abort(e.args[0])
finally:
ostream.close()
def _diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=False, sep=',', ignored_columns=None,
significance=None):
diff = diff_files(from_csv, to_csv, index_columns, sep=sep, ignored_columns=ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
patch.save(diff, ostream, compact=compact)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _diff_and_summarize(from_csv, to_csv, index_columns, stream=sys.stdout,
sep=',', ignored_columns=None, significance=None):
"""
Print a summary of the difference between the two files.
"""
from_records = list(records.load(from_csv, sep=sep))
to_records = records.load(to_csv, sep=sep)
diff = patch.create(from_records, to_records, index_columns, ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
_summarize_diff(diff, len(from_records), stream=stream)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _summarize_diff(diff, orig_size, stream=sys.stdout):
if orig_size == 0:
# slightly arbitrary when the original data was empty
orig_size = 1
n_removed = len(diff['removed'])
n_added = len(diff['added'])
n_changed = len(diff['changed'])
if n_removed or n_added or n_changed:
print(u'%d rows removed (%.01f%%)' % (
n_removed, 100 * n_removed / orig_size
), file=stream)
print(u'%d rows added (%.01f%%)' % (
n_added, 100 * n_added / orig_size
), file=stream)
print(u'%d rows changed (%.01f%%)' % (
n_changed, 100 * n_changed / orig_size
), file=stream)
else:
print(u'files are identical', file=stream)
@click.command()
@click.argument('input_csv', type=click.Path(exists=True))
@click.option('--input', '-i', type=click.Path(exists=True),
help='Read the JSON patch from the given file.')
@click.option('--output', '-o', type=click.Path(),
help='Write the transformed CSV to the given file.')
@click.option('--strict/--no-strict', default=True,
help='Whether or not to tolerate a changed source document '
'(default: strict)')
def csvpatch_cmd(input_csv, input=None, output=None, strict=True):
"""
Apply the changes from a csvdiff patch to an existing CSV file.
"""
patch_stream = (sys.stdin
if input is None
else open(input))
tocsv_stream = (sys.stdout
if output is None
else open(output, 'w'))
fromcsv_stream = open(input_csv)
try:
patch_file(patch_stream, fromcsv_stream, tocsv_stream, strict=strict)
except patch.InvalidPatchError as e:
error.abort('reading patch, {0}'.format(e.args[0]))
finally:
patch_stream.close()
fromcsv_stream.close()
tocsv_stream.close()
|
bsd-3-clause
| 3,120,723,631,916,017,000
| 33.1
| 96
| 0.602698
| false
| 3.812612
| false
| false
| false
|
ResolveWang/algrithm_qa
|
arrandmatrix/q5.py
|
1
|
1037
|
"""
给定一个无序数组arr,求出需要排序的最短子数组长度。
例如:
arr=[1, 5, 3, 4, 2, 6, 7],返回4,因为只有[5, 3, 2, 4]需要排序。
"""
class ShortestSubarr:
@classmethod
def get_shortest_subarr(cls, arr):
if not arr or len(arr) == 1:
return 0
length = len(arr)
max_index = -1
index = length - 1
min_value = arr[index]
while index >= 0:
if arr[index] <= min_value:
min_value = arr[index]
else:
max_index = index
index -= 1
if max_index == -1:
return 0
min_index = -1
index = 0
max_value = arr[index]
while index < length:
if arr[index] >= max_value:
max_value = arr[index]
else:
min_index = index
index += 1
return min_index - max_index + 1
if __name__ == '__main__':
print(ShortestSubarr.get_shortest_subarr([1, 5, 3, 4, 2, 6, 7]))
|
mit
| 4,220,207,234,826,846,000
| 22.512195
| 68
| 0.464174
| false
| 2.963077
| false
| false
| false
|
gongbudaizhe/bilib
|
demos/minion_interrogation/solution.py
|
1
|
1039
|
# This problem can be really computational expensive if we simply traverse all
# the possible orderings (50! = 3.0414093e+64)
# Instead, we observe that if two adjacent minions(m[i], m[i+1]) in the ordering
# with property(t[i], t[i+1], t is the time the minion takes to complete the
# task) and (p[i], p[i+1], p is probability that the minion will tell the true
# answer) have the inequality t[i]/p[i] < t[i+1]/p[i+1], then we should swap the
# two minions to minimize the expected time cost. if t[i]/p[i] = t[i+1]/p[i+1],
# then the order doesn't matter, this is where lexicographical order should
# be used.
def compare(m1, m2):
r1 = m1[0] * m1[2] / float(m1[1])
r2 = m2[0] * m2[2] / float(m2[1])
if r1 == r2:
# lexicographical order
return m1[3] - m2[3]
else:
if r1 > r2:
return 1
else:
return -1
def answer(minions):
# add index
minions_aug = [val + [idx] for idx, val in enumerate(minions)]
return [m[3] for m in sorted(minions_aug, cmp=compare)]
|
mit
| -1,202,021,335,986,941,400
| 38.961538
| 80
| 0.628489
| false
| 2.838798
| false
| false
| false
|
yo-alan/personal
|
v/ui_editar.py
|
1
|
11697
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/alan/dev/personal/v/ui_editar.ui'
#
# Created: Sat Jan 31 18:27:20 2015
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Editar(object):
def setupUi(self, Editar):
Editar.setObjectName(_fromUtf8("Editar"))
Editar.resize(522, 324)
Editar.setModal(True)
self.verticalLayout = QtGui.QVBoxLayout(Editar)
self.verticalLayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.groupBox = QtGui.QGroupBox(Editar)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.formLayout_3 = QtGui.QFormLayout(self.groupBox)
self.formLayout_3.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_3.setObjectName(_fromUtf8("formLayout_3"))
self.lblNombre = QtGui.QLabel(self.groupBox)
self.lblNombre.setObjectName(_fromUtf8("lblNombre"))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.LabelRole, self.lblNombre)
self.leNombre = QtGui.QLineEdit(self.groupBox)
self.leNombre.setObjectName(_fromUtf8("leNombre"))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.FieldRole, self.leNombre)
self.lblApellido = QtGui.QLabel(self.groupBox)
self.lblApellido.setObjectName(_fromUtf8("lblApellido"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.LabelRole, self.lblApellido)
self.leApellido = QtGui.QLineEdit(self.groupBox)
self.leApellido.setObjectName(_fromUtf8("leApellido"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.FieldRole, self.leApellido)
self.lblFechaNacimiento = QtGui.QLabel(self.groupBox)
self.lblFechaNacimiento.setObjectName(_fromUtf8("lblFechaNacimiento"))
self.formLayout_3.setWidget(2, QtGui.QFormLayout.LabelRole, self.lblFechaNacimiento)
self.deFechaNacimiento = QtGui.QDateEdit(self.groupBox)
self.deFechaNacimiento.setObjectName(_fromUtf8("deFechaNacimiento"))
self.formLayout_3.setWidget(2, QtGui.QFormLayout.FieldRole, self.deFechaNacimiento)
self.lblGenero = QtGui.QLabel(self.groupBox)
self.lblGenero.setObjectName(_fromUtf8("lblGenero"))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.LabelRole, self.lblGenero)
self.cmbGenero = QtGui.QComboBox(self.groupBox)
self.cmbGenero.setObjectName(_fromUtf8("cmbGenero"))
self.cmbGenero.addItem(_fromUtf8(""))
self.cmbGenero.addItem(_fromUtf8(""))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.FieldRole, self.cmbGenero)
self.lblCuil = QtGui.QLabel(self.groupBox)
self.lblCuil.setObjectName(_fromUtf8("lblCuil"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.LabelRole, self.lblCuil)
self.leCuil = QtGui.QLineEdit(self.groupBox)
self.leCuil.setMaxLength(13)
self.leCuil.setObjectName(_fromUtf8("leCuil"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.FieldRole, self.leCuil)
self.lblTelefono = QtGui.QLabel(self.groupBox)
self.lblTelefono.setObjectName(_fromUtf8("lblTelefono"))
self.formLayout_3.setWidget(5, QtGui.QFormLayout.LabelRole, self.lblTelefono)
self.leTelefono = QtGui.QLineEdit(self.groupBox)
self.leTelefono.setObjectName(_fromUtf8("leTelefono"))
self.formLayout_3.setWidget(5, QtGui.QFormLayout.FieldRole, self.leTelefono)
self.lblDomicilio = QtGui.QLabel(self.groupBox)
self.lblDomicilio.setObjectName(_fromUtf8("lblDomicilio"))
self.formLayout_3.setWidget(6, QtGui.QFormLayout.LabelRole, self.lblDomicilio)
self.leDomicilio = QtGui.QLineEdit(self.groupBox)
self.leDomicilio.setObjectName(_fromUtf8("leDomicilio"))
self.formLayout_3.setWidget(6, QtGui.QFormLayout.FieldRole, self.leDomicilio)
self.horizontalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(Editar)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.formLayout_2 = QtGui.QFormLayout(self.groupBox_2)
self.formLayout_2.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.lblNroLegajo = QtGui.QLabel(self.groupBox_2)
self.lblNroLegajo.setObjectName(_fromUtf8("lblNroLegajo"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.lblNroLegajo)
self.sbNroLegajo = QtGui.QSpinBox(self.groupBox_2)
self.sbNroLegajo.setMinimum(1)
self.sbNroLegajo.setMaximum(1000)
self.sbNroLegajo.setObjectName(_fromUtf8("sbNroLegajo"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.sbNroLegajo)
self.lblFechaIngreso = QtGui.QLabel(self.groupBox_2)
self.lblFechaIngreso.setObjectName(_fromUtf8("lblFechaIngreso"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.lblFechaIngreso)
self.deFechaIngreso = QtGui.QDateEdit(self.groupBox_2)
self.deFechaIngreso.setObjectName(_fromUtf8("deFechaIngreso"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.deFechaIngreso)
self.lblRevista = QtGui.QLabel(self.groupBox_2)
self.lblRevista.setObjectName(_fromUtf8("lblRevista"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.LabelRole, self.lblRevista)
self.cmbRevista = QtGui.QComboBox(self.groupBox_2)
self.cmbRevista.setObjectName(_fromUtf8("cmbRevista"))
self.cmbRevista.addItem(_fromUtf8(""))
self.cmbRevista.addItem(_fromUtf8(""))
self.cmbRevista.addItem(_fromUtf8(""))
self.cmbRevista.addItem(_fromUtf8(""))
self.cmbRevista.addItem(_fromUtf8(""))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.FieldRole, self.cmbRevista)
self.lblCargo = QtGui.QLabel(self.groupBox_2)
self.lblCargo.setObjectName(_fromUtf8("lblCargo"))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.LabelRole, self.lblCargo)
self.cmbCargo = QtGui.QComboBox(self.groupBox_2)
self.cmbCargo.setObjectName(_fromUtf8("cmbCargo"))
self.cmbCargo.addItem(_fromUtf8(""))
self.cmbCargo.addItem(_fromUtf8(""))
self.cmbCargo.addItem(_fromUtf8(""))
self.cmbCargo.addItem(_fromUtf8(""))
self.cmbCargo.addItem(_fromUtf8(""))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.FieldRole, self.cmbCargo)
self.lblNivel = QtGui.QLabel(self.groupBox_2)
self.lblNivel.setObjectName(_fromUtf8("lblNivel"))
self.formLayout_2.setWidget(6, QtGui.QFormLayout.LabelRole, self.lblNivel)
self.leNivel = QtGui.QLineEdit(self.groupBox_2)
self.leNivel.setObjectName(_fromUtf8("leNivel"))
self.formLayout_2.setWidget(6, QtGui.QFormLayout.FieldRole, self.leNivel)
self.horizontalLayout.addWidget(self.groupBox_2)
self.verticalLayout.addLayout(self.horizontalLayout)
self.buttonBox = QtGui.QDialogButtonBox(Editar)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Editar)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Editar.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Editar.reject)
QtCore.QMetaObject.connectSlotsByName(Editar)
def retranslateUi(self, Editar):
Editar.setWindowTitle(QtGui.QApplication.translate("Editar", "Editar empleado", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("Editar", "Datos personales", None, QtGui.QApplication.UnicodeUTF8))
self.lblNombre.setText(QtGui.QApplication.translate("Editar", "Nombre:", None, QtGui.QApplication.UnicodeUTF8))
self.lblApellido.setText(QtGui.QApplication.translate("Editar", "Apellido:", None, QtGui.QApplication.UnicodeUTF8))
self.lblFechaNacimiento.setText(QtGui.QApplication.translate("Editar", "F. Nacimiento:", None, QtGui.QApplication.UnicodeUTF8))
self.deFechaNacimiento.setDisplayFormat(QtGui.QApplication.translate("Editar", "dd/MM/yyyy", None, QtGui.QApplication.UnicodeUTF8))
self.lblGenero.setText(QtGui.QApplication.translate("Editar", "Género:", None, QtGui.QApplication.UnicodeUTF8))
self.cmbGenero.setItemText(0, QtGui.QApplication.translate("Editar", "Femenino", None, QtGui.QApplication.UnicodeUTF8))
self.cmbGenero.setItemText(1, QtGui.QApplication.translate("Editar", "Masculino", None, QtGui.QApplication.UnicodeUTF8))
self.lblCuil.setText(QtGui.QApplication.translate("Editar", "Cuil:", None, QtGui.QApplication.UnicodeUTF8))
self.lblTelefono.setText(QtGui.QApplication.translate("Editar", "Teléfono:", None, QtGui.QApplication.UnicodeUTF8))
self.lblDomicilio.setText(QtGui.QApplication.translate("Editar", "Domicilio:", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("Editar", "Datos laborales", None, QtGui.QApplication.UnicodeUTF8))
self.lblNroLegajo.setText(QtGui.QApplication.translate("Editar", "Nro. Legajo:", None, QtGui.QApplication.UnicodeUTF8))
self.lblFechaIngreso.setText(QtGui.QApplication.translate("Editar", "Ingreso:", None, QtGui.QApplication.UnicodeUTF8))
self.deFechaIngreso.setDisplayFormat(QtGui.QApplication.translate("Editar", "dd/MM/yyyy", None, QtGui.QApplication.UnicodeUTF8))
self.lblRevista.setText(QtGui.QApplication.translate("Editar", "Sit. de Revista:", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(0, QtGui.QApplication.translate("Editar", "Comisión", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(1, QtGui.QApplication.translate("Editar", "Pasantía", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(2, QtGui.QApplication.translate("Editar", "Permanente", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(3, QtGui.QApplication.translate("Editar", "Temporaria", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(4, QtGui.QApplication.translate("Editar", "Transitoria", None, QtGui.QApplication.UnicodeUTF8))
self.lblCargo.setText(QtGui.QApplication.translate("Editar", "Cargo:", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(0, QtGui.QApplication.translate("Editar", "Administrativo", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(1, QtGui.QApplication.translate("Editar", "Jerárquico", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(2, QtGui.QApplication.translate("Editar", "Obrero", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(3, QtGui.QApplication.translate("Editar", "Profesional", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(4, QtGui.QApplication.translate("Editar", "Servicio", None, QtGui.QApplication.UnicodeUTF8))
self.lblNivel.setText(QtGui.QApplication.translate("Editar", "Nivel:", None, QtGui.QApplication.UnicodeUTF8))
|
mit
| 4,544,980,707,808,152,000
| 68.595238
| 139
| 0.729644
| false
| 3.384081
| false
| false
| false
|
soasme/wikisensei
|
wikisensei/wiki/migrations/0001_initial.py
|
1
|
1769
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-12 08:39
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Version',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.IntegerField(default=0)),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-version'],
},
),
migrations.CreateModel(
name='Wiki',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('version', models.IntegerField(default=0)),
('privacy', models.IntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='version',
name='wiki',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='versions', to='wiki.Wiki'),
),
]
|
gpl-3.0
| -5,097,590,489,198,971,000
| 35.854167
| 122
| 0.568683
| false
| 4.411471
| false
| false
| false
|
tchellomello/home-assistant
|
tests/components/homematicip_cloud/test_device.py
|
1
|
8283
|
"""Common tests for HomematicIP devices."""
from homematicip.base.enums import EventType
from homeassistant.components.homematicip_cloud import DOMAIN as HMIPC_DOMAIN
from homeassistant.components.homematicip_cloud.hap import HomematicipHAP
from homeassistant.const import STATE_ON, STATE_UNAVAILABLE
from homeassistant.helpers import device_registry as dr, entity_registry as er
from .helper import (
HAPID,
HomeFactory,
async_manipulate_test_data,
get_and_check_entity_basics,
)
from tests.async_mock import patch
async def test_hmip_load_all_supported_devices(hass, default_mock_hap_factory):
"""Ensure that all supported devices could be loaded."""
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=None, test_groups=None
)
assert len(mock_hap.hmip_device_by_entity_id) == 191
async def test_hmip_remove_device(hass, default_mock_hap_factory):
"""Test Remove of hmip device."""
entity_id = "light.treppe_ch"
entity_name = "Treppe CH"
device_model = "HmIP-BSL"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Treppe"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert hmip_device
device_registry = await dr.async_get_registry(hass)
entity_registry = await er.async_get_registry(hass)
pre_device_count = len(device_registry.devices)
pre_entity_count = len(entity_registry.entities)
pre_mapping_count = len(mock_hap.hmip_device_by_entity_id)
hmip_device.fire_remove_event()
await hass.async_block_till_done()
assert len(device_registry.devices) == pre_device_count - 1
assert len(entity_registry.entities) == pre_entity_count - 3
assert len(mock_hap.hmip_device_by_entity_id) == pre_mapping_count - 3
async def test_hmip_add_device(hass, default_mock_hap_factory, hmip_config_entry):
"""Test Remove of hmip device."""
entity_id = "light.treppe_ch"
entity_name = "Treppe CH"
device_model = "HmIP-BSL"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Treppe"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert hmip_device
device_registry = await dr.async_get_registry(hass)
entity_registry = await er.async_get_registry(hass)
pre_device_count = len(device_registry.devices)
pre_entity_count = len(entity_registry.entities)
pre_mapping_count = len(mock_hap.hmip_device_by_entity_id)
hmip_device.fire_remove_event()
await hass.async_block_till_done()
assert len(device_registry.devices) == pre_device_count - 1
assert len(entity_registry.entities) == pre_entity_count - 3
assert len(mock_hap.hmip_device_by_entity_id) == pre_mapping_count - 3
reloaded_hap = HomematicipHAP(hass, hmip_config_entry)
with patch(
"homeassistant.components.homematicip_cloud.HomematicipHAP",
return_value=reloaded_hap,
), patch.object(reloaded_hap, "async_connect"), patch.object(
reloaded_hap, "get_hap", return_value=mock_hap.home
), patch(
"homeassistant.components.homematicip_cloud.hap.asyncio.sleep"
):
mock_hap.home.fire_create_event(event_type=EventType.DEVICE_ADDED)
await hass.async_block_till_done()
assert len(device_registry.devices) == pre_device_count
assert len(entity_registry.entities) == pre_entity_count
new_hap = hass.data[HMIPC_DOMAIN][HAPID]
assert len(new_hap.hmip_device_by_entity_id) == pre_mapping_count
async def test_hmip_remove_group(hass, default_mock_hap_factory):
"""Test Remove of hmip group."""
entity_id = "switch.strom_group"
entity_name = "Strom Group"
device_model = None
mock_hap = await default_mock_hap_factory.async_get_mock_hap(test_groups=["Strom"])
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert hmip_device
device_registry = await dr.async_get_registry(hass)
entity_registry = await er.async_get_registry(hass)
pre_device_count = len(device_registry.devices)
pre_entity_count = len(entity_registry.entities)
pre_mapping_count = len(mock_hap.hmip_device_by_entity_id)
hmip_device.fire_remove_event()
await hass.async_block_till_done()
assert len(device_registry.devices) == pre_device_count
assert len(entity_registry.entities) == pre_entity_count - 1
assert len(mock_hap.hmip_device_by_entity_id) == pre_mapping_count - 1
async def test_all_devices_unavailable_when_hap_not_connected(
hass, default_mock_hap_factory
):
"""Test make all devices unavaulable when hap is not connected."""
entity_id = "light.treppe_ch"
entity_name = "Treppe CH"
device_model = "HmIP-BSL"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Treppe"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert hmip_device
assert mock_hap.home.connected
await async_manipulate_test_data(hass, mock_hap.home, "connected", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_UNAVAILABLE
async def test_hap_reconnected(hass, default_mock_hap_factory):
"""Test reconnect hap."""
entity_id = "light.treppe_ch"
entity_name = "Treppe CH"
device_model = "HmIP-BSL"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Treppe"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
assert hmip_device
assert mock_hap.home.connected
await async_manipulate_test_data(hass, mock_hap.home, "connected", False)
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_UNAVAILABLE
mock_hap._accesspoint_connected = False # pylint: disable=protected-access
await async_manipulate_test_data(hass, mock_hap.home, "connected", True)
await hass.async_block_till_done()
ha_state = hass.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hap_with_name(hass, mock_connection, hmip_config_entry):
"""Test hap with name."""
home_name = "TestName"
entity_id = f"light.{home_name.lower()}_treppe_ch"
entity_name = f"{home_name} Treppe CH"
device_model = "HmIP-BSL"
hmip_config_entry.data = {**hmip_config_entry.data, "name": home_name}
mock_hap = await HomeFactory(
hass, mock_connection, hmip_config_entry
).async_get_mock_hap(test_devices=["Treppe"])
assert mock_hap
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert hmip_device
assert ha_state.state == STATE_ON
assert ha_state.attributes["friendly_name"] == entity_name
async def test_hmip_reset_energy_counter_services(hass, default_mock_hap_factory):
"""Test reset_energy_counter service."""
entity_id = "switch.pc"
entity_name = "Pc"
device_model = "HMIP-PSM"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state
await hass.services.async_call(
"homematicip_cloud",
"reset_energy_counter",
{"entity_id": "switch.pc"},
blocking=True,
)
assert hmip_device.mock_calls[-1][0] == "reset_energy_counter"
assert len(hmip_device._connection.mock_calls) == 2 # pylint: disable=W0212
await hass.services.async_call(
"homematicip_cloud", "reset_energy_counter", {"entity_id": "all"}, blocking=True
)
assert hmip_device.mock_calls[-1][0] == "reset_energy_counter"
assert len(hmip_device._connection.mock_calls) == 4 # pylint: disable=W0212
|
apache-2.0
| -8,402,342,774,571,276,000
| 33.227273
| 88
| 0.683569
| false
| 3.154227
| true
| false
| false
|
yeming233/rally
|
rally/plugins/openstack/scenarios/cinder/volume_backups.py
|
1
|
2728
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils
from rally.task import validation
"""Scenarios for Cinder Volume Backup."""
@validation.add("number", param_name="size", minval=1, integer_only=True)
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="create_volume_kwargs")
@validation.add("restricted_parameters", param_names="name",
subdict="create_backup_kwargs")
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_cinder_services", services="cinder-backup")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup": ["cinder"]},
name="CinderVolumeBackups."
"create_incremental_volume_backup", platform="openstack")
class CreateIncrementalVolumeBackup(cinder_utils.CinderBasic):
def run(self, size, do_delete=True, create_volume_kwargs=None,
create_backup_kwargs=None):
"""Create a incremental volume backup.
The scenario first create a volume, the create a backup, the backup
is full backup. Because Incremental backup must be based on the
full backup. finally create a incremental backup.
:param size: volume size in GB
:param do_delete: deletes backup and volume after creating if True
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self.cinder.create_volume(size, **create_volume_kwargs)
backup1 = self.cinder.create_backup(volume.id, **create_backup_kwargs)
backup2 = self.cinder.create_backup(volume.id, incremental=True)
if do_delete:
self.cinder.delete_backup(backup2)
self.cinder.delete_backup(backup1)
self.cinder.delete_volume(volume)
|
apache-2.0
| -3,003,851,158,195,206,000
| 44.466667
| 78
| 0.70088
| false
| 4.158537
| false
| false
| false
|
MapQuest/mapquest-osm-server
|
src/python/frontend/maphandler.py
|
1
|
7622
|
# Copyright (c) 2011 AOL Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
## Support retrieval of the map data in a bounding box.
import geohash
import tornado.web
from lxml import etree as ET
import apiserver.const as C
from apiserver.osmelement import encode_coordinate, new_osm_response
from util import filter_references, response_to_xml
def _filter_in_bbox(bbox, geodocs):
"Return the list of nodes that fall into the given bounding box."
w,s,e,n = map(encode_coordinate, bbox)
nodeset = set()
for gd in geodocs:
for (nid, lat, lon) in gd.get_node_info():
if w <= lon < e and s <= lat < n:
nodeset.add(nid)
return nodeset
class MapHandler(tornado.web.RequestHandler):
"Handle requests for the /map API."
def initialize(self, cfg, datastore):
self.datastore = datastore
self.precision = cfg.getint(C.DATASTORE, C.GEOHASH_LENGTH)
def get(self, *args, **kwargs):
'''Service a GET request to the '/map' URI.
The 'bbox' parameter contains 4 coordinates "l" (w), "b" (s),
"r" (e) and "t" (n).'''
# Sanity check the input.
bbox_arg = self.get_argument('bbox', None)
if not bbox_arg:
raise tornado.web.HTTPError(400) # Bad Syntax
bbox = bbox_arg.split(',')
if len(bbox) != 4:
raise tornado.web.HTTPError(400)
try:
w,s,e,n = map(float, bbox)
except ValueError:
raise tornado.web.HTTPError(400)
# Check the "l,b,r,t" coordinates passed in for sanity.
if w < C.LON_MIN or w > C.LON_MAX or \
e < C.LON_MIN or e > C.LON_MAX or \
s < C.LAT_MIN or s > C.LAT_MAX or \
n < C.LAT_MIN or n > C.LAT_MAX or \
n < s or e < w:
raise tornado.web.HTTPError(400)
nodelist, ways, relations = self.handle_map(bbox)
response = self.build_bbox_response(nodelist, ways, relations, bbox)
self.set_header(C.CONTENT_TYPE, C.TEXT_XML)
self.write(response_to_xml(response))
def build_bbox_response(self, nodes, ways, relations, bbox):
"""Build an OSM response for the query."""
# Create a new response element.
osm = new_osm_response()
# Add a <bounds> element.
bb = ET.SubElement(osm, C.BOUNDS)
(bb.attrib[C.MINLON], bb.attrib[C.MINLAT],
bb.attrib[C.MAXLON], bb.attrib[C.MAXLAT]) = map(str, bbox)
# Add nodes, ways and relations in that order.
for n in nodes:
n.build_response(osm)
for w in ways:
w.build_response(osm)
for r in relations:
r.build_response(osm)
return osm
def handle_map(self, bbox):
"""Implementation of the /map API.
Parameters:
bbox -- Bounding box coordinates.
"""
nodelist = []
relations = []
ways = []
# This implementation follows the current implementation of
# the API server at api.openstreetmap.org (the 'rails' port).
# Look up the geo coded documents covering the desired bbox.
gckeys = self.get_geocodes(bbox)
geodocs = self.datastore.fetch_keys(C.GEODOC, gckeys)
# Step 1: Get the list of nodes contained in the given
# bounding box.
nodeset = _filter_in_bbox(bbox,
[gd for (st, gd) in geodocs if st])
if len(nodeset) == 0:
return (nodelist, ways, relations)
nodelist = [z for (st, z) in self.datastore.fetch_keys(
C.NODE, [n for n in nodeset]) if st]
# Step 2: Retrieve all ways that reference at least one node
# in the given bounding box.
wayset = filter_references(C.WAY, nodelist)
# Step 3: Retrieve any additional nodes referenced by the ways
# retrieved.
waynodeset = set()
for (st,w) in self.datastore.fetch_keys(C.WAY, [w for w in wayset]):
if st:
ways.append(w)
waynodeset.update(w.get_node_ids())
extranodeset = waynodeset - nodeset
nodelist.extend([n for (st,n) in
self.datastore.fetch_keys(C.NODE,
[n for n in extranodeset])
if st])
nodeset = nodeset | extranodeset
# Step 4: Retrieve the relations associated with these nodes.
# ... all relations that reference nodes being returned.
relset = filter_references(C.RELATION, nodelist)
# ... and relations that reference one of the ways in the wayset.
relset.update(filter_references(C.RELATION, ways))
# ... retrieve relations from the data store.
relations = [xr for (st,xr) in
self.datastore.fetch_keys(C.RELATION,
[r for r in relset])
if st]
# ... and relations referenced by existing relations
# (one-pass only).
extrarelset = filter_references(C.RELATION, relations)
newrelset = extrarelset - relset
newrels = [nr for (st, nr) in
self.datastore.fetch_keys(C.RELATION,
[r for r in newrelset])
if st]
relations.extend(newrels)
return (nodelist, ways, relations)
def get_geocodes(self, bbox):
"""Return a list of keys covering a given area.
Parameters:
bbox -- Bounding box of the desired region.
"""
# TODO: Make this more efficient for sparse areas of the map.
w, s, e, n = map(float, bbox)
n = min(C.MAXGHLAT, n) # work around a geohash library
s = min(C.MAXGHLAT, s) # limitation
assert(w <= e and s <= n)
gcset = set()
gc = geohash.encode(s, w, self.precision)
bl = geohash.bbox(gc) # Box containing point (s,w).
s_ = bl['s'];
while s_ < n: # Step south to north.
w_ = bl['w']
gc = geohash.encode(s_, w_, self.precision)
bb_sn = geohash.bbox(gc) # bounding box in S->N direction
while w_ < e: # Step west to east.
gcset.add(gc)
bb_we = geohash.bbox(gc) # in W->E direction
w_ = bb_we['e']
gc = geohash.encode(s_, w_, self.precision)
s_ = bb_sn['n']
assert(len(gcset) > 0)
return [gc for gc in gcset]
|
mit
| 5,400,930,514,382,168,000
| 32.875556
| 77
| 0.581737
| false
| 3.865112
| false
| false
| false
|
matus-stehlik/glowing-batman
|
problems/migrations/0005_auto__add_field_usersolution_score.py
|
1
|
16422
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserSolution.score'
db.add_column(u'problems_usersolution', 'score',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding M2M table for field corrected_by on 'UserSolution'
m2m_table_name = db.shorten_name(u'problems_usersolution_corrected_by')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('usersolution', models.ForeignKey(orm[u'problems.usersolution'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['usersolution_id', 'user_id'])
def backwards(self, orm):
# Deleting field 'UserSolution.score'
db.delete_column(u'problems_usersolution', 'score')
# Removing M2M table for field corrected_by on 'UserSolution'
db.delete_table(db.shorten_name(u'problems_usersolution_corrected_by'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'competitions.competition': {
'Meta': {'ordering': "['name']", 'object_name': 'Competition'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'competitions_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'competitions_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'organizer_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'events.event': {
'Meta': {'ordering': "['-start_time', 'end_time']", 'object_name': 'Event'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'registered_org': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'organized_event_set'", 'symmetrical': 'False', 'through': u"orm['events.EventOrgRegistration']", 'to': u"orm['auth.User']"}),
'registered_user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'through': u"orm['events.EventUserRegistration']", 'symmetrical': 'False'}),
'registration_end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {})
},
u'events.eventorgregistration': {
'Meta': {'ordering': "(u'_order',)", 'unique_together': "(('event', 'organizer'),)", 'object_name': 'EventOrgRegistration'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'organizer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'events.eventuserregistration': {
'Meta': {'ordering': "(u'_order',)", 'unique_together': "(('event', 'user'),)", 'object_name': 'EventUserRegistration'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'leaflets.leaflet': {
'Meta': {'ordering': "['competition', '-year', 'issue']", 'unique_together': "(('competition', 'year', 'issue'),)", 'object_name': 'Leaflet'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['competitions.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.IntegerField', [], {}),
'leaflet': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'problems.orgsolution': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'OrgSolution'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'organizer solutions_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'organizer solutions_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'organizer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['problems.Problem']"})
},
u'problems.problem': {
'Meta': {'object_name': 'Problem'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'problems_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['problems.ProblemCategory']"}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['competitions.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'problems_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'severity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['problems.ProblemSeverity']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'problems.problemcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'ProblemCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'problems.probleminset': {
'Meta': {'ordering': "['position']", 'unique_together': "(['problem', 'problemset'],)", 'object_name': 'ProblemInSet'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['problems.Problem']"}),
'problemset': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['problems.ProblemSet']"})
},
u'problems.problemset': {
'Meta': {'object_name': 'ProblemSet'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sets_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['competitions.Competition']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Event']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'leaflet': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['leaflets.Leaflet']", 'null': 'True', 'blank': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sets_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'problems': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['problems.Problem']", 'through': u"orm['problems.ProblemInSet']", 'symmetrical': 'False'})
},
u'problems.problemseverity': {
'Meta': {'ordering': "['level']", 'object_name': 'ProblemSeverity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'problems.usersolution': {
'Meta': {'ordering': "(u'_order',)", 'unique_together': "(['user', 'problem'],)", 'object_name': 'UserSolution'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user solutions_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
'corrected_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'usersolutions_corrected_set'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user solutions_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['problems.Problem']"}),
'score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'solution': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['problems']
|
mit
| 1,788,266,054,158,258,400
| 82.790816
| 230
| 0.563512
| false
| 3.618775
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.