repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
alope107/nbgrader | nbgrader/preprocessors/computechecksums.py | Python | bsd-3-clause | 771 | 0.002594 | from nbgrader import utils
from nbgrader.preprocessors import NbGraderPreprocessor
class ComputeChecksums(NbGraderPreprocessor):
"""A preprocessor to compute checksums of grade cells."""
def preprocess_cell(self, cell, resources, cell_in | dex):
# compute checksums of grade cell and solution cells
if utils.is_grade(cell) or utils.is_solution(cell) or utils.is_locked(cell):
checksum = | utils.compute_checksum(cell)
cell.metadata.nbgrader['checksum'] = checksum
if utils.is_grade(cell) or utils.is_solution(cell):
self.log.debug(
"Checksum for '%s' is %s",
cell.metadata.nbgrader['grade_id'],
checksum)
return cell, resources
|
capoe/espressopp.soap | src/analysis/Test.py | Python | gpl-3.0 | 1,624 | 0.012315 | # Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
****************************
**espressopp.analysis.Test**
****************************
.. function:: espressopp.analysis.Test(system)
:param system:
:type system:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.AnalysisBase i | mport *
from _espressopp import analysis_Test
class TestLocal(AnalysisBaseLocal, analysis_Test):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_Test, system)
if pmi.isController :
class Test(AnalysisBase) | :
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.TestLocal'
)
|
nextgis-extra/tests | lib_gdal/ogr/ogr_sxf.py | Python | gpl-2.0 | 2,553 | 0.005875 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id: ogr_sxf.py 26513 2013-10-02 11:59:50Z bishop $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test OGR SXF driver functionality.
# Author: Dmitry Baryshnikov <polimax@mail.ru>
#
###############################################################################
# Copyright (c) 2013, NextGIS
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
###############################################################################
import sys
sys.path.append( '../pymod' )
import gdaltest
from osgeo import ogr
###############################################################################
# Open SXF datasource.
def ogr_sxf_1():
gdaltest.sxf_ds = None
with gdaltest.error_handler():
# Expect Warning 0 and Warning 6.
gdaltest.sxf_ds = ogr.Open( 'data/100_test.sxf' )
if gdaltest.sxf_ds is not None:
return 'success'
else:
return 'fail'
###############################################################################
# Run test_ogrsf
def ogr_sxf_2():
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path | () + ' data/100_test.sxf')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
print(ret)
return 'fail'
return 'success'
###############################################################################
#
def ogr_sxf_cleanup():
if gdaltest.sxf_ds is None:
return 'skip'
gd | altest.sxf_ds = None
return 'success'
gdaltest_list = [
ogr_sxf_1,
ogr_sxf_2,
ogr_sxf_cleanup ]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_sxf' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
ct-23/home-assistant | tests/components/cover/test_template.py | Python | apache-2.0 | 26,113 | 0 | """The tests the cover command line platform."""
import logging
import unittest
from homeassistant.core import callback
from homeassistant import setup
import homeassistant.components.cover as cover
from homeassistant.const import STATE_OPEN, STATE_CLOSED
from tests.common import (
get_test_home_assistant, assert_setup_component)
_LOGGER = logging.getLogger(__name__)
class TestTemplateCover(unittest.TestCase):
"""Test the cover command line platform."""
hass = None
calls = None
# pylint: disable=invalid-name
def setup_method(self, method):
"""Initialize services when tests are started."""
self.hass = get_test_home_assistant()
self.calls = []
@callback
def record_call(service):
"""Track function calls.."""
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_template_state_text(self):
"""Test the state text of a template."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'value_template':
"{{ states.cover.test_state.state }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.set('cover.test_state', STATE_OPEN)
self.hass. | block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.state == STATE_OPEN
state = self.hass.states.set('cover.test_state', STATE_CLOSED)
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.state == STATE_CLOSED
def test_template_state_boolean(self | ):
"""Test the value_template attribute."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'value_template':
"{{ 1 == 1 }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.state == STATE_OPEN
def test_template_position(self):
"""Test the position_template attribute."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'position_template':
"{{ states.cover.test.attributes.position }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.set('cover.test', STATE_CLOSED)
self.hass.block_till_done()
entity = self.hass.states.get('cover.test')
attrs = dict()
attrs['position'] = 42
self.hass.states.async_set(
entity.entity_id, entity.state,
attributes=attrs)
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_position') == 42.0
assert state.state == STATE_OPEN
state = self.hass.states.set('cover.test', STATE_OPEN)
self.hass.block_till_done()
entity = self.hass.states.get('cover.test')
attrs['position'] = 0.0
self.hass.states.async_set(
entity.entity_id, entity.state,
attributes=attrs)
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_position') == 0.0
assert state.state == STATE_CLOSED
def test_template_tilt(self):
"""Test the tilt_template attribute."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'value_template':
"{{ 1 == 1 }}",
'tilt_template':
"{{ 42 }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_tilt_position') == 42.0
def test_template_out_of_bounds(self):
"""Test template out-of-bounds condition."""
with assert_setup_component(1, 'cover'):
assert setup.setup_component(self.hass, 'cover', {
'cover': {
'platform': 'template',
'covers': {
'test_template_cover': {
'position_template':
"{{ -1 }}",
'tilt_template':
"{{ 110 }}",
'open_cover': {
'service': 'cover.open_cover',
'entity_id': 'cover.test_state'
},
'close_cover': {
'service': 'cover.close_cover',
'entity_id': 'cover.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('cover.test_template_cover')
assert state.attributes.get('current_tilt_position') is None
assert state.attributes.get('current_position') is None
def test_template_mutex(self) |
Small-Star/PDV3 | app/views.py | Python | gpl-3.0 | 5,584 | 0.008059 | from flask import render_template
from app import app, db
from app.models import Mood, QS_Params, Lifts
import pandas as pd
import graph_mood, graph_diet, graph_body, graph_weightlifting, graph_meditation, analysis
@app.route("/")
@app.route("/index")
def index():
title = "Index"
return render_template("index.html",title=title)
@app.route("/mood")
def mood():
q = Mood.query.filter(Mood.date != None)
data = pd.read_sql(q.statement, q.session.bind)
script, div_days, div_avg_a, div_avg_v, div_good_days, div_poor_days, div_caution_days, div_warning_days, plot_ts_div, plot_vr_div, ma_slider_div = graph_mood.mood_graph(data)
return render_template("mood.html",data=data, script=script, div_days=div_days, div_avg_a=div_avg_a, div_avg_v=div_avg_v, div_good_days=div_good_days, div_poor_days=div_poor_days, div_caution_days=div_caution_days, div_warning_days=div_warning_days, plot_ts_div=plot_ts_div, plot_vr_div=plot_vr_div, ma_slider_div=ma_slider_div, title="MOOD")
@app.route("/body")
def body():
q = QS_Params.query.filter(QS_Params.kcal_intake >= 0) #Should include all the days
data = pd.read_sql(q.statement, q.session.bind)
script, div_days, div_avg_bg, div_avg_rhr, div_avg_slp_dur, div_avg_slp_q, div_days_bc, div_avg_wt, div_avg_bf, plot_blood_div, plot_rhr_div, plot_osq_div, plot_body_comp_div, plot_sleep_div, ma_slider_div = graph_body.body_graph(data)
return render_template("body.html", data=data, script=script, div_days=div_days, div_avg_bg=div_avg_bg, div_avg_rhr=div_avg_rhr, div_avg_slp_dur=div_avg_slp_ | dur, div_avg_slp_q=div_avg_slp_q, div_days_b | c=div_days_bc, div_avg_wt=div_avg_wt, div_avg_bf=div_avg_bf, plot_blood_div=plot_blood_div, plot_rhr_div=plot_rhr_div, plot_osq_div=plot_osq_div, plot_body_comp_div=plot_body_comp_div, plot_sleep_div=plot_sleep_div, ma_slider_div=ma_slider_div, title="BODY")
@app.route("/diet")
def diet():
q = QS_Params.query.filter(QS_Params.kcal_intake >= 0)
data = pd.read_sql(q.statement, q.session.bind)
script, div_days, div_avg_intake, div_tdee, div_avg_net, div_avg_protein, div_avg_fat, div_avg_carb_all, div_avg_carb_net, div_avg_carb_fiber, div_problem_days, div_volatility, plot_comparison_div, plot_composition_div, ma_slider_div = graph_diet.diet_graph(data)
return render_template("diet.html",data=data, script=script, div_days=div_days, div_avg_intake=div_avg_intake, div_tdee=div_tdee, div_avg_net=div_avg_net, div_avg_protein=div_avg_protein, div_avg_fat=div_avg_fat, div_avg_carb_all=div_avg_carb_all, div_avg_carb_net=div_avg_carb_net, div_avg_carb_fiber=div_avg_carb_fiber, div_problem_days=div_problem_days, div_volatility=div_volatility, plot_composition_div=plot_composition_div, plot_comparison_div=plot_comparison_div, ma_slider_div=ma_slider_div, title="DIET")
@app.route("/weightlifting")
def weightlifting():
q = Lifts.query.filter(Lifts.date != None)
data = pd.read_sql(q.statement, q.session.bind)
script, div_stairs, div_num_ohp, div_workouts, div_wilks, div_squat_max, div_deadlift_max, div_bench_max, div_ohp_max, div_squat_max_vol_per_set, div_deadlift_max_vol_per_set, div_bench_max_vol_per_set, div_ohp_max_vol_per_set, div_squat_total_vol, div_deadlift_total_vol, div_bench_total_vol, div_ohp_total_vol, plot_max_div, plot_mvps_div, plot_tv_div, ma_slider_div = graph_weightlifting.weightlifting_graph(data)
return render_template("weightlifting.html",data=data, script=script, div_stairs=div_stairs, div_num_ohp=div_num_ohp, div_workouts=div_workouts, div_wilks=div_wilks, div_squat_max=div_squat_max, div_deadlift_max=div_deadlift_max, div_bench_max=div_bench_max, div_ohp_max=div_ohp_max, div_squat_max_vol_per_set=div_squat_max_vol_per_set, div_deadlift_max_vol_per_set=div_deadlift_max_vol_per_set, div_bench_max_vol_per_set=div_bench_max_vol_per_set, div_ohp_max_vol_per_set=div_ohp_max_vol_per_set, div_squat_total_vol=div_squat_total_vol, div_deadlift_total_vol=div_deadlift_total_vol, div_bench_total_vol=div_bench_total_vol, div_ohp_total_vol=div_ohp_total_vol, plot_max_div=plot_max_div, plot_mvps_div=plot_mvps_div, plot_tv_div=plot_tv_div, ma_slider_div=ma_slider_div, title="WEIGHTLIFTING")
@app.route("/meditation")
def meditation():
q = QS_Params.query.filter(QS_Params.meditation_time >= 0).order_by(QS_Params.date)
data = pd.read_sql(q.statement, q.session.bind)
script, plot_daily_div, plot_cumu_div = graph_meditation.meditation_graph(data)
return render_template("meditation.html",data=data, script=script, plot_daily_div=plot_daily_div, plot_cumu_div=plot_cumu_div, title="MEDITATION")
@app.route("/books")
def books():
title = "BOOKS"
return render_template("books.html",title=title)
@app.route("/goals")
def goals():
title = "GOAL TRACKING"
return render_template("goals.html",title=title)
@app.route("/finances")
def finances():
title = "finances"
return render_template("finances.html",title=title)
@app.route("/dayviewer")
def dayviewer():
title = "Dayviewer"
return render_template("dayviewer.html",title=title)
@app.route("/analysis_vbe")
def analysis_vbe():
title = "Regression Models: VBE"
return render_template("analysis_vbe.html",title=title)
@app.route("/settings")
def settings():
title = "Settings"
return render_template("settings.html",title=title)
@app.route("/update_db")
def update_db():
title = "Update DB"
return render_template("update_db.html",title=title)
@app.route("/rebuild_db")
def rebuild_db():
title = "Rebuild DB"
return render_template("rebuild_db.html",title=title)
|
veveykocute/Spl | splc.py | Python | unlicense | 19,239 | 0.007745 | import sys
import math
"""A Shakespeare Compiler written in Python, splc.py
This is a compiler that implements the majority of the Shakespeare programming language
invented by Kalle Hasselstrom and Jon Aslund, I take no credit for inventing the language.
This software is free to edit or use, and though I doubt anyone would use this for many projects,
I guess I would appreciate some degree of acknowledgment if you do.
(c) V1.2 Sam Donow 2013-2014
sad3@williams.edu
drsam94@gmail.com"""
#missing features
#full support for multi-word nouns/names
#Stacks, who needs them?
pos_adj = []
neg_adj = []
pos_comp = []
neg_comp = []
pos_nouns = []
neg_nouns = []
valid_names= []
zero_nouns = ['nothing', 'zero']
src = ""
N = 0
vartable = set([])
speaker = ""
target = ""
stage = set([])
actnum = 0
act_names = {}
scene_names= []
#report a compile-time error, then exit
def Assert(b, s):
global N
if not b:
sys.stderr.write(s + " at line " + str(N) + "\n")
sys.exit(1)
#Abstraction for writing to file, eased python 2/3 agnosticity,
#and will eventually allow file output instead of stdout if that
#ever is desired
def writeToFile(s):
sys.stdout.write(str(s) + "\n")
def isNoun(word):
return word in pos_nouns or word in neg_nouns or word in zero_nouns
def isAdjective(word):
return word in pos_adj or word in neg_adj
def isComparative(word):
return word in pos_comp or word in neg_comp
#returns 1 for "nice" and neutral nouns, -1 for nasty ones
def nounValue(word):
Assert(isNoun(word), "Tried to find the nounvalue of a non-noun")
return 1 if word in pos_nouns else -1 if word in neg_nouns else 0
#return s with all whitespace characters removed
def trimWhitespace(s):
trimmed = ""
for c in s:
if c not in ['\t', '\r', '\n', ' ']:
trimmed += c
return trimmed
#return s with all whitespace characters before the first non-whitedspace character removed
def trimLeadingWhitespace(s):
trimIndex = 0
for c in s:
if c in ['\t', '\r', '\n', ' ']:
trimIndex +=1
else:
break
return s[trimIndex:]
#A whitespace-agnositic beginswith method
def beginsWithNoWhitespace(s, pattern):
return beginsWith(trimWhitespace(s), pattern)
def beginsWith(s, pattern):
return s[:len(pattern)] == pattern
def loadFileIntoList(filename, list):
f = open(filename, 'r')
for word in f.readlines():
list.append(word.split(" ")[-1][:-1])
f.close()
#load initial noun and adjective lists
def loadWordLists():
loadFileIntoList("include/neutral_adjective.wordlist" , pos_adj)
loadFileIntoList("include/positive_adjective.wordlist", pos_adj)
loadFileIntoList("include/negative_adjective.wordlist", neg_adj)
loadFileIntoList("include/positive_noun.wordlist", pos_nouns)
loadFileIntoList("include/neutral_noun.wordlist" , pos_nouns)
loadFileIntoList("include/negative_noun.wordlist", neg_nouns)
loadFileIntoList("include/positive_comparative.wordlist", pos_comp)
loadFileIntoList("include/positive_comparative.wordlist", neg_comp)
loadFileIntoList("include/character.wordlist", valid_names)
roman_values = { 'M': 1000, 'D': 500, 'C': 1000, 'L': 50, 'X': 10, 'V': 5, 'I': 1 }
def parseRomanNumeral(roman_string):
roman_string = ro | man_string.upper()
strindex = 0
roman_sum = 0
while strindex < len(roman_string) - 1:
| if(roman_values[roman_string[strindex]] < roman_values[roman_string[strindex+1]]):
roman_sum -= roman_values[roman_string[strindex]]
else:
roman_sum += roman_values[roman_string[strindex]]
strindex += 1
return roman_sum + roman_values[roman_string[strindex]]
def isNumber(s):
words = s.split(" ")
for word in words:
if isNoun(word):
return True
return False
#parse a string that is supposed to evaluate to a number
#if failOk is set to true, will return 0 for phrases that do not evaluate to a number
def parseNum(s, failOk = False):
words = s.split(" ")
nounIndex = len(words)
for i in range(0,len(words)):
if isNoun(words[i]):
nounIndex = i
break
ok = nounIndex < len(words)
if not ok and failOk:
return 0
Assert (ok, str(words) + "\nExpected a number, but found no noun")
value = nounValue(words[nounIndex])
for word in words[:nounIndex]:
if isAdjective(word):
value *= 2
return value
def parseEnterOrExit():
global stage
endBracket = src[N].find(']')
Assert(endBracket >= 0, "[ without matching ]")
enterOrExit = src[N][src[N].find('[')+1:src[N].find(']')]
if beginsWithNoWhitespace(enterOrExit, "Enter"):
names = enterOrExit[enterOrExit.find(" ") + 1:].split(" and ")
for namestr in names:
name = namestr.split(" ")[-1]
Assert(name in vartable, "Undeclared actor entering a scene")
stage.add(name)
Assert(len(stage) < 3, "Too many actors on stage")
elif beginsWithNoWhitespace(enterOrExit, "Exit"):
names = enterOrExit[enterOrExit.find(" ") + 1:].split(" and ")
for namestr in names:
name = namestr.split(" ")[-1]
Assert(name in stage, "Trying to make an actor who is not in the scene exit")
stage.remove(name)
elif beginsWithNoWhitespace(enterOrExit, "Exeunt"):
stage = set([])
else:
Assert(False, "Bracketed clause without Enter, Exit, or Exeunt")
#returns the index of the leftmost punctuation mark in s
def findPunctuation(s):
valids = []
for val in [s.find('.'), s.find('!'), s.find('?')]:
if val >= 0:
valids.append(val)
return -1 if len(valids) == 0 else min(valids)
#returns an array of the punctuation-delimited statements at the current location in the parsing
def getStatements():
global N
statements = []
line = trimLeadingWhitespace(src[N])
unfinished = False
while line.find(':') < 0 and line.find('[') < 0:
punctuation = findPunctuation(line)
if punctuation < 0:
if unfinished == False:
statements.append(line[:-1])
else:
statements[-1] += line[:-1]
N += 1
line = src[N]
unfinished = True
elif punctuation > 0:
if not unfinished:
statements.append("")
statements[-1] += line[:punctuation]
line = line[punctuation + 1:]
unfinished = False
retval = []
for stat in statements:
if len(trimWhitespace(stat)) > 0:
retval.append(stat)
return retval
class Tree:
def __init__(self, v, l, r):
self.value = v
self.left = l
self.right = r
def wordToOperator(op):
if op == "sum":
return "+"
elif op == "difference":
return "-"
elif op == "quotient":
return "/"
elif op == "product":
return "*"
else:
Assert(False, "Illegal Operator")
binop = ["sum", "difference", "quotient", "product"]
unop = ["square", "cube", "twice"]
def buildExpressionTree(expr):
Assert (len(expr) > 0, "Ill-formed Expression in " + str(expr))
if expr[0] == "square":
if expr[1] == "root":
op = "(int)sqrt"
expr = expr[2:]
num, expr = buildExpressionTree(expr)
return Tree(op, num, ""), expr
elif expr[0] == "remainder":
if expr[1] == "of" and expr[2] == "the" and expr[3] == "quotient":
expr = expr[4:]
op = "%"
left, expr = buildExpressionTree(expr)
right, expr = buildExpressionTree(expr)
return Tree(op, left, right), expr
if expr[0] in binop:
op = wordToOperator(expr[0])
expr = expr[1:]
left, expr = buildExpressionTree(expr)
right, expr = buildExpressionTree(expr)
return Tree(op, left, right), expr
elif expr[0] in unop:
op = expr[0]
expr = expr[1:]
num, expr = buildExpressionTree(expr)
return Tree(op, num, ""), expr
|
opencord/xos | lib/xos-api/xosapi/chameleon_client/protos/schema_pb2_grpc.py | Python | apache-2.0 | 2,079 | 0.00481 | #!/usr/bin/env python
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WAR | RANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
from __fu | ture__ import absolute_import
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from . import schema_pb2 as schema__pb2
class SchemaServiceStub(object):
"""Schema services
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetSchema = channel.unary_unary(
'/schema.SchemaService/GetSchema',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=schema__pb2.Schemas.FromString,
)
class SchemaServiceServicer(object):
"""Schema services
"""
def GetSchema(self, request, context):
"""Return active grpc schemas
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SchemaServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetSchema': grpc.unary_unary_rpc_method_handler(
servicer.GetSchema,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=schema__pb2.Schemas.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'schema.SchemaService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
os-cloud-storage/openstack-workload-disaster-recovery | dragon/db/sqlalchemy/migration.py | Python | apache-2.0 | 3,810 | 0.00105 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import distutils.version as dist_version
import os
import sys
from dragon.db.sqlalchemy.session import get_engine
from dragon.db import migration
import sqlalchemy
import migrate
from migrate.versioning import util as migrate_util
from dragon.openstack.common import exception
from dragon.openstack.common.gettextutils import _
_REPOSITORY = None
@migrate_util.decorator
def patched_with_engine(f, *a, **kw):
url = a[0] |
engine = migrate_util.construct_engine(url, **kw)
try:
kw['engine'] = engine
return f(*a, **kw)
finally:
if isinstance(engine, migrate_util.Engine) and engine is not url:
| migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
engine.dispose()
# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine
# NOTE(jkoelker) Delay importing migrate until we are patched
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
try:
from migrate.versioning import exceptions as versioning_exceptions
except ImportError:
try:
from migrate import exceptions as versioning_exceptions
except ImportError:
sys.exit(_("python-migrate is not installed. Exiting."))
#_REPOSITORY = None
def db_sync(version=None):
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.Error(_("version should be an integer"))
current_version = db_version()
repository = _find_migrate_repo()
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version():
repository = _find_migrate_repo()
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError as exc:
# If we aren't version controlled there may be an existing,
# non-version controlled database present.
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables):
raise exc
db_version_control(migration.INIT_VERSION)
return versioning_api.db_version(get_engine(), repository)
def db_version_control(version=None):
repository = _find_migrate_repo()
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
global _REPOSITORY
if _REPOSITORY is None:
_REPOSITORY = Repository(path)
return _REPOSITORY
|
nyergler/nested-formset | setup.py | Python | bsd-3-clause | 1,386 | 0.001443 | from setuptools import setup, find_packages
import sys, os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
NEWS = open(os.path.join(here, 'NEWS.txt')).read()
version = '0.1.4'
setup(name='django-nested-formset',
description='Nest Django formsets for multi-level hierarchical editing',
author='Nathan Yergler',
author_email='nathan@yergler.net',
version=version,
long_description=README + '\n\n' + NEWS,
classifiers=[
'Framework :: Django',
'Framework :: Django :: 1.11',
'License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='',
url='https://github.com/nyergle | r/nested-formset',
license='BSD',
packages=find_packages('src'),
package_dir={'': 'src'},
incl | ude_package_data=True,
zip_safe=False,
install_requires=[
'Django<2.0',
],
tests_require=[
'rebar',
],
test_suite='nested_formset.tests.run_tests',
)
|
the-zebulan/CodeWars | katas/beta/multiply_list_by_integer_with_restrictions.py | Python | mit | 86 | 0.011628 | fro | m operator import mul
def multiply(n, l):
return map(lambda a: mul(a, n), | l)
|
cybergarage/round-py | round/test.py | Python | bsd-3-clause | 1,569 | 0.006373 | #################################################################
#
# Round for Python
#
# Copyright (C) Satoshi Konno 2016
#
# This is licensed under BSD-style license, see file COPYING.
#
##################################################################
from __future__ import absolute_import
from .server import Server, DebugServer, ProcessServer, ContainerServer
from .node import Node
class TestProcessServer(ProcessServer):
def __init__(self):
ProcessServer.__init__(self)
def __del__(self):
self.stop()
def start(self, n=1):
return ProcessServer.start(self, n)
def stop(self):
return ProcessServer.stop(self)
class TestDebugServer(DebugServer):
def __init__(self):
DebugServer.__init__(self)
def __del__(self):
self.stop()
def start(self, n=1):
return DebugServer.start(self, n)
def stop(self):
return DebugServer.stop(self)
class TestServer:
@staticmethod
def Create():
| # return TestProcessServer()
return TestDebugServer()
class TestNode(Node):
def __init__(self):
Node.__init__(self)
self.server = TestServer.Create()
self.server.start()
self.start()
def start(self):
if | not self.server.start():
return False
node = self.server.nodes[0]
if not node.is_alive:
return False
self.set_node(node)
return True
def stop(self):
self.server.stop()
return True
def __del__(self):
self.stop() |
UKN-DBVIS/SciBib | app/backend/db_controller/helper.py | Python | apache-2.0 | 2,777 | 0.002521 | # Copyright (C) 2020 University of Konstanz - Data Analysis and Visualization Group
# This file is part of SciBib <https://github.com/dbvis-ukon/SciBib>.
#
# SciBib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SciBib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SciBib. If not, see <http://www.gnu.org/licenses/>.
from backend.db_controller.db import SQLAlchemy
# from flask_sqlalchemy import SQLAlchemy
from backend.db_controller.db import Users_publication
db = SQLAlchemy()
def _is_authorized(pub_id, curr_user):
"""
@note currently not in use.
Check if a user is authorized to edit a publication. A user must either have created the publication or be an admin.
@param pub_id: the database ID of the publication
@type pub_id: int
@param curr_user: the user object of the user trying to edit a publication
@type curr_user: User object
@r | eturn: if the user is authorized
@rtype: bool
"""
# check if the current user is the editor of the publication
is_editor = db.session.query(
db.session().query(Users_publication)\
.filter( | Users_publication.user_id == curr_user.get_id() and Users_publication.publication_id == pub_id).exists()
).scalar()
db.session.close()
return is_editor or curr_user.has_role('admin')
def _createCiteName(authors, year, title):
"""
Create a name for a bibtex citation:
* concat the first two letters of the first three author
* with the publication year
* and the first word of the publication title
@param authors: the authors of a publication
@type authors: list(Author)
@param year: the publication year of a publication
@type year: int
@param title: the title of a publication
@type title: string
@return: the newly created citename for the publication
@rtype: string
"""
citename = ''.join([a['surname'][:2].title() for a in authors[:3]])
if len(authors) > 3:
citename += '+'
citename += year
citename += title.split()[0]
return citename
def isInt(s):
"""
Check if a string is secretly an int.
@param s: string to check
@type s: string
@return: if the string is an int
@rtype: bool
"""
try:
int(s)
return True
except ValueError:
return False
|
Poofjunior/dxf2gcode | gui/messagebox.py | Python | gpl-3.0 | 2,949 | 0.001018 | # -*- coding: utf-8 -*-
############################################################################
#
# Copyright (C) 2011-2014
# Christian Kohlöffel
#
# This file is part of DXF2GCODE.
#
# DXF2GCODE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DXF2GCODE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DXF2GCODE. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
"""
Special purpose canvas including all required plotting function etc.
"""
from globals.six import text_type
import globals.constants as c
if c.PYQT5notPYQT4:
from PyQt5.QtWidgets import QTextBrowser
from PyQt5 import QtCore
else:
from PyQt4.QtGui import QTextBrowser
from PyQt4 import QtCore
class MessageBox(QTextBrowser):
"""
The MessageBox Class performs the write functions in the Message Window.
The previous defined MessageBox class is used as output (Within ui).
@sideeffect: None
"""
def __init__(self, origobj):
"""
Initialization of the MessageBox class.
@param origobj: This is the reference to to parent | class initialized
previously.
"""
super(MessageBox, self).__init__()
self.setOpenExternalLinks(True)
self.append(self.tr("You are using DXF2GCODE"))
self.append(self.tr("Version %s (%s)") % (c.VERSION, c.DATE))
self.append(self.tr("For more information and updates visit:"))
self.append("<a href='http://sourceforge.net/projects/dxf2gcode/'>http://sourceforge.net/projects/dxf2gcode/</a>")
def tr(self, stri | ng_to_translate):
"""
Translate a string using the QCoreApplication translation framework
@param: string_to_translate: a unicode string
@return: the translated unicode string if it was possible to translate
"""
return text_type(QtCore.QCoreApplication.translate('MessageBox',
string_to_translate))
def write(self, string):
"""
The function is called by the window logger to write
the log message to the Messagebox
@param charstr: The log message which will be written.
"""
stripped_string = string.strip()
if stripped_string:
self.append(stripped_string)
self.verticalScrollBar().setValue(1e9)
|
jhamman/xarray | xarray/tests/test_formatting.py | Python | apache-2.0 | 12,536 | 0.000718 | import sys
from textwrap import dedent
imp | ort numpy as np
import pandas as pd
import xarray as xr
from xarray.core import formatting
from . import raises_regex
class TestFormatting:
def test_get_indexer_at_least_n_items(self):
cases = [
((20,), (slice(10),), (slice(-10, None),)),
( | (3, 20), (0, slice(10)), (-1, slice(-10, None))),
((2, 10), (0, slice(10)), (-1, slice(-10, None))),
((2, 5), (slice(2), slice(None)), (slice(-2, None), slice(None))),
((1, 2, 5), (0, slice(2), slice(None)), (-1, slice(-2, None), slice(None))),
((2, 3, 5), (0, slice(2), slice(None)), (-1, slice(-2, None), slice(None))),
(
(1, 10, 1),
(0, slice(10), slice(None)),
(-1, slice(-10, None), slice(None)),
),
(
(2, 5, 1),
(slice(2), slice(None), slice(None)),
(slice(-2, None), slice(None), slice(None)),
),
((2, 5, 3), (0, slice(4), slice(None)), (-1, slice(-4, None), slice(None))),
(
(2, 3, 3),
(slice(2), slice(None), slice(None)),
(slice(-2, None), slice(None), slice(None)),
),
]
for shape, start_expected, end_expected in cases:
actual = formatting._get_indexer_at_least_n_items(shape, 10, from_end=False)
assert start_expected == actual
actual = formatting._get_indexer_at_least_n_items(shape, 10, from_end=True)
assert end_expected == actual
def test_first_n_items(self):
array = np.arange(100).reshape(10, 5, 2)
for n in [3, 10, 13, 100, 200]:
actual = formatting.first_n_items(array, n)
expected = array.flat[:n]
assert (expected == actual).all()
with raises_regex(ValueError, "at least one item"):
formatting.first_n_items(array, 0)
def test_last_n_items(self):
array = np.arange(100).reshape(10, 5, 2)
for n in [3, 10, 13, 100, 200]:
actual = formatting.last_n_items(array, n)
expected = array.flat[-n:]
assert (expected == actual).all()
with raises_regex(ValueError, "at least one item"):
formatting.first_n_items(array, 0)
def test_last_item(self):
array = np.arange(100)
reshape = ((10, 10), (1, 100), (2, 2, 5, 5))
expected = np.array([99])
for r in reshape:
result = formatting.last_item(array.reshape(r))
assert result == expected
def test_format_item(self):
cases = [
(pd.Timestamp("2000-01-01T12"), "2000-01-01T12:00:00"),
(pd.Timestamp("2000-01-01"), "2000-01-01"),
(pd.Timestamp("NaT"), "NaT"),
(pd.Timedelta("10 days 1 hour"), "10 days 01:00:00"),
(pd.Timedelta("-3 days"), "-3 days +00:00:00"),
(pd.Timedelta("3 hours"), "0 days 03:00:00"),
(pd.Timedelta("NaT"), "NaT"),
("foo", "'foo'"),
(b"foo", "b'foo'"),
(1, "1"),
(1.0, "1.0"),
]
for item, expected in cases:
actual = formatting.format_item(item)
assert expected == actual
def test_format_items(self):
cases = [
(np.arange(4) * np.timedelta64(1, "D"), "0 days 1 days 2 days 3 days"),
(
np.arange(4) * np.timedelta64(3, "h"),
"00:00:00 03:00:00 06:00:00 09:00:00",
),
(
np.arange(4) * np.timedelta64(500, "ms"),
"00:00:00 00:00:00.500000 00:00:01 00:00:01.500000",
),
(pd.to_timedelta(["NaT", "0s", "1s", "NaT"]), "NaT 00:00:00 00:00:01 NaT"),
(
pd.to_timedelta(["1 day 1 hour", "1 day", "0 hours"]),
"1 days 01:00:00 1 days 00:00:00 0 days 00:00:00",
),
([1, 2, 3], "1 2 3"),
]
for item, expected in cases:
actual = " ".join(formatting.format_items(item))
assert expected == actual
def test_format_array_flat(self):
actual = formatting.format_array_flat(np.arange(100), 2)
expected = "0 ... 99"
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 9)
expected = "0 ... 99"
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 10)
expected = "0 1 ... 99"
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 13)
expected = "0 1 ... 98 99"
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 15)
expected = "0 1 2 ... 98 99"
assert expected == actual
actual = formatting.format_array_flat(np.arange(100.0), 11)
expected = "0.0 ... 99.0"
assert expected == actual
actual = formatting.format_array_flat(np.arange(100.0), 1)
expected = "0.0 ... 99.0"
assert expected == actual
actual = formatting.format_array_flat(np.arange(3), 5)
expected = "0 1 2"
assert expected == actual
actual = formatting.format_array_flat(np.arange(4.0), 11)
expected = "0.0 ... 3.0"
assert expected == actual
actual = formatting.format_array_flat(np.arange(0), 0)
expected = ""
assert expected == actual
actual = formatting.format_array_flat(np.arange(1), 0)
expected = "0"
assert expected == actual
actual = formatting.format_array_flat(np.arange(2), 0)
expected = "0 1"
assert expected == actual
actual = formatting.format_array_flat(np.arange(4), 0)
expected = "0 ... 3"
assert expected == actual
def test_pretty_print(self):
assert formatting.pretty_print("abcdefghij", 8) == "abcde..."
assert formatting.pretty_print("ß", 1) == "ß"
def test_maybe_truncate(self):
assert formatting.maybe_truncate("ß", 10) == "ß"
def test_format_timestamp_out_of_bounds(self):
from datetime import datetime
date = datetime(1300, 12, 1)
expected = "1300-12-01"
result = formatting.format_timestamp(date)
assert result == expected
date = datetime(2300, 12, 1)
expected = "2300-12-01"
result = formatting.format_timestamp(date)
assert result == expected
def test_attribute_repr(self):
short = formatting.summarize_attr("key", "Short string")
long = formatting.summarize_attr("key", 100 * "Very long string ")
newlines = formatting.summarize_attr("key", "\n\n\n")
tabs = formatting.summarize_attr("key", "\t\t\t")
assert short == " key: Short string"
assert len(long) <= 80
assert long.endswith("...")
assert "\n" not in newlines
assert "\t" not in tabs
def test_diff_array_repr(self):
da_a = xr.DataArray(
np.array([[1, 2, 3], [4, 5, 6]], dtype="int64"),
dims=("x", "y"),
coords={
"x": np.array(["a", "b"], dtype="U1"),
"y": np.array([1, 2, 3], dtype="int64"),
},
attrs={"units": "m", "description": "desc"},
)
da_b = xr.DataArray(
np.array([1, 2], dtype="int64"),
dims="x",
coords={
"x": np.array(["a", "c"], dtype="U1"),
"label": ("x", np.array([1, 2], dtype="int64")),
},
attrs={"units": "kg"},
)
byteorder = "<" if sys.byteorder == "little" else ">"
expected = dedent(
"""\
Left and right DataArray objects are not identical
Differing dimensions:
(x: 2, y: 3) != (x: 2)
Differing values:
L
array([[1, 2, 3],
[4, 5, 6]], dtype=int64)
R
array([1, 2], dtype=int64)
Differing coordinates:
L * x (x) %cU1 'a' 'b'
R * |
inventree/InvenTree | InvenTree/InvenTree/serializers.py | Python | mit | 18,182 | 0.00132 | """
Serializers used in various InvenTree apps
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import tablib
from decimal import Decimal
from collections import OrderedDict
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError as DjangoValidationError
from django.utils.translation import ugettext_lazy as _
from django.db import models
from djmoney.contrib.django_rest_framework.fields import MoneyField
from djmoney.money import Money
from djmoney.utils import MONEY_CLASSES, get_currency_field_name
from rest_framework import serializers
from rest_framework.utils import model_meta
from rest_framework.fields import empty
from rest_framework.exceptions import ValidationError
from rest_framework.serializers import DecimalField
from .models import extract_int
class InvenTreeMoneySerializer(MoneyField):
"""
Custom serializer for 'MoneyField',
which ensures that passed values are numerically valid
Ref: https://github.com/django-money/django-money/blob/master/djmoney/contrib/django_rest_framework/fields.py
"""
def __init__(self, *args, **kwargs):
kwargs["max_digits"] = kwargs.get("max_digits", 19)
kwargs["decimal_places"] = kwargs.get("decimal_places", 4)
super().__init__(*args, **kwargs)
def get_value(self, data):
"""
Test that the returned amount is a valid Decimal
"""
amount = super(DecimalField, self).get_value(data)
# Convert an empty string to None
if len(str(amount).strip()) == 0:
amount = None
try:
if amount is not None and amount is not empty:
amount = Decimal(amount)
except:
raise ValidationError({
self.field_name: [_("Must be a valid number")],
})
currency = data.get(get_currency_field_name(self.field_name), self.default_currency)
if currency and amount is not None and not isinstance(amount, MONEY_CLASSES) and amount is not empty:
return Money(amount, currency)
return amount
class UserSerializer(serializers.ModelSerializer):
""" Serializer for User - provides all fields """
class Meta:
model = User
fields = 'all'
class UserSerializerBrief(serializers.ModelSerializer):
""" Serializer for User - provides limited information """
class Meta:
model = User
fields = [
'pk',
'username',
]
class InvenTreeModelSerializer(serializers.ModelSerializer):
"""
Inherits the standard Django ModelSerializer class,
but also ensures that the underlying model class data are checked on validation.
"""
def __init__(self, instance=None, data=empty, **kwargs):
"""
Custom __init__ routine to ensure that *default* values (as specified in the ORM)
are used by the DRF serializers, *if* the values are not provided by the user.
"""
# If instance is None, we are creating a new instance
if instance is None and data is not empty:
if data is None:
data = OrderedDict()
else:
new_data = OrderedDict()
new_data.update(data)
data = new_data
# Add missing fields which have default values
ModelClass = self.Meta.model
fields = model_meta.get_field_info(ModelClass)
for field_name, field in fields.fields.items():
"""
Update the field IF (and ONLY IF):
- The field has a specified default value
- The field does not already have a value set
"""
if field.has_default() and field_name not in data:
value = field.default
# Account for callable functions
if callable(value):
try:
value = value()
except:
continue
data[field_name] = value
super().__init__(instance, data, **kwargs)
def get_initial(self):
"""
Construct initial data for the serializer.
Use the 'default' values specified by the django model definition
"""
initials = super().get_initial().copy()
# Are we creating a new instance?
if self.instance is None:
ModelClass = self.Meta.model
fields = model_meta.get_field_info(ModelClass)
for field_name, field in fields.fields.items():
if field.has_default() and field_name not in initials:
value = field.default
# Account for callable functions
if callable(value):
try:
value = value()
except:
continue
initials[field_name] = value
return initials
def save(self, **kwargs):
"""
Catch any django ValidationError thrown at the moment save() is called,
and re-throw as a DRF ValidationError
"""
try:
super().save(**kwargs)
except (ValidationError, DjangoValidationError) as exc:
raise ValidationError(detail=serializers.as_serializer_error(exc))
return self.instance
def update(self, instance, validated_data):
"""
Catch any django ValidationError, and re-throw as a DRF ValidationError
"""
try:
instance = super().update(instance, validated_data)
except (ValidationError, DjangoValidationError) as exc:
raise ValidationError(detail=serializers.as_serializer_error(exc))
return instance
def run_validation(self, data=empty):
"""
Perform serializer validation.
In addition to running validators on the serializer fields,
this class ensures that the underlying model is also validated.
"""
# Run any native validation checks first (may raise a ValidationError)
data = super().run_validation(data)
# Now ensure the underlying model is correct
if not hasattr(self, 'instance') or self.instance is None:
# No instance exists (we are creating a new one)
instance = self.Meta.model(**data)
else:
# Instance already exists (we are updating!)
instance = self.instance
# Update instance fields
for attr, value in data.items():
try:
setattr(instance, attr, value)
except (ValidationError, DjangoValidationError) as exc:
raise ValidationError(detail=serializers.as_serializer_error(exc))
# Run a 'full_clean' on the model.
# Note that by default, DRF does *not* perform full model validation!
try:
instance.full_clean()
except (ValidationError, DjangoValidationError) as exc:
data = exc.message_dict
# Change '__all__' key (django style) to 'n | on_field_errors' (DRF style)
if '__all__' in data:
data['non_field_errors'] = data['__all__']
del data['__all__']
raise ValidationError(data)
return data
class ReferenceIndexingSerializerMixin():
"""
This serializer mixin ensures the the reference is not to big / small
for the BigIntegerField
"""
def validate_reference(self, value):
if extract_int(value) > models.Big | IntegerField.MAX_BIGINT:
raise serializers.ValidationError('reference is to to big')
return value
class InvenTreeAttachmentSerializerField(serializers.FileField):
"""
Override the DRF native FileField serializer,
to remove the leading server path.
For example, the FileField might supply something like:
http://127.0.0.1:8000/media/foo/bar.jpg
Whereas we wish to return:
/media/foo/bar. |
samastur/django-filer | filer/fields/file.py | Python | bsd-3-clause | 5,482 | 0.002189 | #-*- coding: utf-8 -*-
import inspect
from django import forms
from django.conf import settings as globalsettings
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.contrib.admin.sites import site
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import models
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.text import truncate_words
from filer.models import File
from filer import settings as filer_settings
import logging
logger = logging.getLogger(__name__)
class AdminFileWidget(ForeignKeyRawIdWidget):
choices = None
def render(self, name, value, attrs=None):
obj = self.obj_for_value(value)
css_id = attrs.get('id', 'id_image_x')
css_id_thumbnail_img = "%s_thumbnail_img" % css_id
css_id_description_txt = "%s_description_txt" % css_id
related_url = None
if value:
try:
file_obj = File.objects.get(pk=value)
related_url = file_obj.logical_folder.\
get_admin_directory_listing_url_path()
except Exception,e:
# catch exception and manage it. We can re-raise it for debugging
# purposes and/or just logging it, provided user configured
# proper logging configuration
if filer_settings.FILER_ENABLE_LOGGING:
logger.error('Error while rendering file widget: %s',e)
if filer_settings.FILER_DEBUG:
raise e
if not related_url:
related_url = reverse('admin:filer-directory_listing-last')
params = self.url_parameters()
if params:
lookup_url = '?' + '&'.join(
['%s=%s' % (k, v) for k, v in params.items()])
else:
lookup_url = ''
if not 'class' in attrs:
# The JavaScript looks for this hook.
attrs['class'] = 'vForeignKeyRawIdAdminField'
# rendering the super for ForeignKeyRawIdWidget on purpose here because
# we only need the input and none of the other stuff that
# ForeignKeyRawIdWidget adds
hidden_input = super(ForeignKeyRawIdWidget, self).render(
name, value, attrs)
filer_static_prefix = filer_settings.FILER_STATICMEDIA_PREFIX
if not filer_static_prefix[-1] == '/':
filer_static_prefix += '/'
context = {
'hidden_input': hidden_input,
'lookup_url': '%s%s' % (related_url, lookup_url),
'thumb_id': css_id_thumbnail_img,
'span_id': css_id_description_txt,
'object': obj,
'lookup_name': name,
'filer_static_prefix': filer_static_prefix,
'clear_id': '%s_clear' % css_id,
'id': css_id,
}
html = render_to_string('admin/filer/widgets/admin_file.html', context)
return mark_safe(html)
def label_for_value(self, value):
obj = self.obj_for_value(value)
return ' <strong>%s</strong>' % truncate_words(obj, 14)
def obj_for_value(self, value):
try:
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
except:
obj = None
return obj
class Media:
js = (filer_settings.FILER_STATICMEDIA_PREFIX + 'js/popup_handling.js',)
class AdminFileFormField(forms.ModelChoiceField):
widget = AdminFileWidget
def __init__(self, rel, queryset, to_field_name, *args, **kwargs):
self.rel = rel
self.queryset = queryset
self.to_field_name = to_field_name
self.max_value = None
self.min_value = None
other_widget = kwargs.pop('widget', None)
if 'admin_site' in inspect.getargspec(self.widget.__init__)[0]: # Django 1.4
widget_instance | = self.widget(rel, site)
else: # Django <= 1.3
widget_instance = self.widget(rel)
forms.Field.__init__(self, widget=widget_instance, *args, **kwargs)
def widget_attrs(self, widget):
widget.required = self.required
return {}
class FilerFileField(models.ForeignKey):
default_form_class = AdminFileFormField
default_model_class = File
def __init__(self, | **kwargs):
# we call ForeignKey.__init__ with the Image model as parameter...
# a FilerImageFiled can only be a ForeignKey to a Image
return super(FilerFileField, self).__init__(
self.default_model_class, **kwargs)
def formfield(self, **kwargs):
# This is a fairly standard way to set up some defaults
# while letting the caller override them.
defaults = {
'form_class': self.default_form_class,
'rel': self.rel,
}
defaults.update(kwargs)
return super(FilerFileField, self).formfield(**defaults)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect ourselves, since we inherit.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.related.ForeignKey"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
|
maplesond/msa2qubo | gurobi.py | Python | gpl-3.0 | 1,975 | 0.028354 | #!/usr/bin/env python3
import bvc
from gurobipy import *
import numpy as np
__author__ = "Dan Mapleson, Luis Yanes, Katie Barr, Sophie Kirkwood and Tim Stitt"
__copyright__ = "Copyright 2016, Quantum MSA"
__credits__ = ["Dan Mapleson", "Luis Yanes", "Katie Barr",
"Sophie Kirkwood", "Tim Stitt"]
__license__ = "GPLv3"
__version__ = "0.0.1"
__maintainer__ = "Dan Mapleson,"
__email__ = "daniel.mapleson@earlham.ac.uk"
__status__ = "Prototype"
def optimise(data):
# Create a new model
m = Model("qp")
# Create variables
x_k = 0
G_k = data.get_gVarOffset(intmode=True)
vars = [None] * data.get_NbIV()
for k in range(data.N()):
L_k = data.lenK(k)
for j in range(L_k):
x_kj = x_k + j
vars[x_kj] = m.addVar(name="x_" + str(k) + "," + str(j), vtype=GRB.INTEGE | R)
x_k += L_k
for k in range(data.N()):
L_k = data.lenK(k)
for j in range(L_k):
G_kj = G_k + j
vars[G_kj] = m.addVar(name="G_" + str(k) + "," + str(j), vtype=GRB.INTEGER)
G_k += L_k
# Integrate new variables
m.update()
data.createBVMatrix(intmode=True)
data.printIntegerCoefficients()
# Set objective: x^2 + x*y + y^2 + y*z + z^2 + 2 x
obj = data.ienergy
for i in | range(data.get_NbIV()):
for j in range(data.get_NbIV()):
if data.qim(i,j) != 0:
obj += data.qim(i,j) * vars[i] * vars[j]
if data.lil(i) != 0:
obj += data.lil(i) * vars[i]
obj = data.l0() * (obj)
print("Integer Objective Function:")
print(obj)
print()
m.setObjective(obj)
for i in range(data.get_NbPositioningVars(intmode=True)):
m.addConstr(vars[i] >= 0, "cx" + str(i))
m.addConstr(vars[i] <= data.M(), "cx" + str(i))
for i in range(data.get_gVarOffset(intmode=True),data.get_NbIV()):
m.addConstr(vars[i] >= 0, "cG" + str(i-data.get_gVarOffset(intmode=True)))
m.addConstr(vars[i] <= data.P(), "cG" + str(i-data.get_gVarOffset(intmode=True)))
m.optimize()
for v in m.getVars():
print('%s: %g' % (v.varName, v.x))
print('Obj: %g' % obj.getValue())
|
tiagochiavericosta/edx-platform | lms/djangoapps/open_ended_grading/views.py | Python | agpl-3.0 | 15,748 | 0.002794 | import logging
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from courseware.courses import get_course_with_access
from courseware.access import has_access
from courseware.tabs import EnrolledTab
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
import json
from student.models import unique_id_for_user
from open_ended_grading import open_ended_notifications
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import search
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.exceptions import NoPathToItem
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.utils.translation import ugettext as _
from open_ended_grading.utils import (
STAFF_ERROR_MESSAGE, StudentProblemList, generate_problem_url, create_controller_query_service
)
from xblock_django.models import XBlockDisableConfig
log = logging.getLogger(__name__)
def _reverse_with_slash(url_name, course_key):
"""
Reverses the URL given the name and the course id, and then adds a trailing slash if
it does not exist yet.
@param url_name: The name of the url (eg 'staff_grading').
@param course_id: The id of the course object (eg course.id).
@returns: The reversed url with a trailing slash.
"""
ajax_url = _reverse_without_slash(url_name, course_key)
if not ajax_url.endswith('/'):
ajax_url += '/'
return ajax_url
def _reverse_without_slash(url_name, course_key):
course_id = course_key.to_deprecated_string()
ajax_url = reverse(url_name, kwargs={'course_id': course_id})
return ajax_url
DESCRIPTION_DICT = {
'Peer Grading': _("View all problems that require peer assessment in this particular course."),
'Staff Grading': _("View ungraded submissions submitted by students for the open ended problems in the course."),
'Problems you have submitted': _("View open ended problems that you have previously submitted for grading."),
'Flagged Submissions': _("View submissions that have been flagged by students as inappropriate."),
}
ALERT_DICT = {
'Peer Grading': _("New submissions to grade"),
'Staff Grading': _("New submissions to grade"),
'Problems you have submitted': _("New grades have been returned"),
'Flagged Submissions': _("Submissions have been flagged for review"),
}
class StaffGradingTab(EnrolledTab):
"""
A tab for staff grading.
"""
type = 'staff_grading'
title = _("Staff grading")
view_name = "staff_grading"
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if XBlockDisableConfig.is_block_type_disabled('combinedopenended'):
return False
if user and not has_access(user, 'staff', course, course.id):
return False
return "combinedopenended" in course.advanced_modules
class PeerGradingTab(EnrolledTab):
"""
A tab for peer grading.
"""
type = 'peer_grading'
# Translators: "Peer grading" appears on a tab that allows
# students to view open-ended problems that require grading
title = _("Peer grading")
view_name = "peer_grading"
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if XBlockDisableConfig.is_block_type_disabled('combinedopenended'):
return False
if not super(PeerGradingTab, cls).is_enabled(course, user=user):
return False
return "combinedopenended" in course.advanced_modules
class OpenEndedGradingTab(EnrolledTab):
"""
A tab for open ended grading.
"""
type = 'open_ended'
# Translators: "Open Ended Panel" appears on a tab that, when clicked, opens up a panel that
# displays information about open-ended problems that a user has submitted or needs to grade
title = _("Open Ended Panel")
view_name = "open_ended_notifications"
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if XBlockDisableConfig.is_block_type_disabled('combinedopenended'):
return False
if not super(OpenEndedGradingTab, cls).is_enabled(course, user=user):
return False
return "combinedopenended" in course.advanced_modules
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def staff_grading(request, course_id):
"""
Show the instructor grading interface.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'staff', course_key)
ajax_url = _reverse_with_slash('staff_grading', course_key)
return render_to_response('instructor/staff_grading.html', {
'course': course,
'course_id': course_id,
'ajax_url': ajax_url,
# Checked above
'staff_access': True, })
def find_peer_grading_module(course):
"""
Given a course, finds the first peer grading module in it.
@param course: A course object.
@return: boolean found_module, string problem_url
""" |
# Reverse the base course url.
base_course_url = reve | rse('courses')
found_module = False
problem_url = ""
# Get the peer grading modules currently in the course. Explicitly specify the course id to avoid issues with different runs.
items = modulestore().get_items(course.id, qualifiers={'category': 'peergrading'})
# See if any of the modules are centralized modules (ie display info from multiple problems)
items = [i for i in items if not getattr(i, "use_for_single_location", True)]
# Loop through all potential peer grading modules, and find the first one that has a path to it.
for item in items:
# Generate a url for the first module and redirect the user to it.
try:
problem_url_parts = search.path_to_location(modulestore(), item.location)
except NoPathToItem:
# In the case of nopathtoitem, the peer grading module that was found is in an invalid state, and
# can no longer be accessed. Log an informational message, but this will not impact normal behavior.
log.info(u"Invalid peer grading module location %s in course %s. This module may need to be removed.", item.location, course.id)
continue
problem_url = generate_problem_url(problem_url_parts, base_course_url)
found_module = True
return found_module, problem_url
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def peer_grading(request, course_id):
'''
When a student clicks on the "peer grading" button in the open ended interface, link them to a peer grading
xmodule in the course.
'''
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
#Get the current course
course = get_course_with_access(request.user, 'load', course_key)
found_module, problem_url = find_peer_grading_module(course)
if not found_module:
error_message = _("""
Error with initializing peer grading.
There has not been a peer grading module created in the courseware that would allow you to grade others.
Please check back later for this.
""")
log.exception(error_message + u"Current course is: {0}".format(course_id))
return HttpResponse(error_message)
return HttpResponseRedirect(problem_url)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def student_problem_list(request, course_id):
"""
Show a list of problems they have attempted to a student.
Fetch the list from the grading controller server and append some data.
@param request: The request object for this view.
@param course_id: The id of the course to get the problem list for.
@return: Renders an HTML problem list table.
"""
assert isinstance(course_id, basestring)
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
# Load the course. Don't catch any errors here, as we want them to be loud.
|
lianliuwei/gyp | pylib/gyp/generator/xcode.py | Python | bsd-3-clause | 53,816 | 0.006875 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that sh | are the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERM | EDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
|
justyns/home-assistant | homeassistant/components/sensor/time_date.py | Python | mit | 2,911 | 0 | """
Support for showing the date and the time.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.time_date/
| """
import logging
import homeassistant.util.dt as dt_util
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
OPTION_TYPES = {
'time': 'Time',
'date': 'Date',
'date_time': 'Date & Time',
'time_date': 'Time & Date',
'beat': 'Time (beat)',
't | ime_utc': 'Time (UTC)',
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Time and Date sensor."""
if hass.config.time_zone is None:
_LOGGER.error("Timezone is not set in Home Assistant config")
return False
dev = []
for variable in config['display_options']:
if variable not in OPTION_TYPES:
_LOGGER.error('Option type: "%s" does not exist', variable)
else:
dev.append(TimeDateSensor(variable))
add_devices(dev)
# pylint: disable=too-few-public-methods
class TimeDateSensor(Entity):
"""Implementation of a Time and Date sensor."""
def __init__(self, option_type):
"""Initialize the sensor."""
self._name = OPTION_TYPES[option_type]
self.type = option_type
self._state = None
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if "date" in self.type and "time" in self.type:
return "mdi:calendar-clock"
elif "date" in self.type:
return "mdi:calendar"
else:
return "mdi:clock"
def update(self):
"""Get the latest data and updates the states."""
time_date = dt_util.utcnow()
time = dt_util.datetime_to_time_str(dt_util.as_local(time_date))
time_utc = dt_util.datetime_to_time_str(time_date)
date = dt_util.datetime_to_date_str(dt_util.as_local(time_date))
# Calculate the beat (Swatch Internet Time) time without date.
hours, minutes, seconds = time_date.strftime('%H:%M:%S').split(':')
beat = ((int(seconds) + (int(minutes) * 60) + ((int(hours) + 1) *
3600)) / 86.4)
if self.type == 'time':
self._state = time
elif self.type == 'date':
self._state = date
elif self.type == 'date_time':
self._state = date + ', ' + time
elif self.type == 'time_date':
self._state = time + ', ' + date
elif self.type == 'time_utc':
self._state = time_utc
elif self.type == 'beat':
self._state = '{0:.2f}'.format(beat)
|
nputikhin/simple-ant-hrm | SimpleHRMServer/sensorserver.py | Python | mit | 2,312 | 0.003893 | import socket
import time
class SensorTCPServer:
'''
Server for Galileo with connected ANT HRM
'''
def startServer(self, serverAddr, handler):
'''
Open a new socket and bind it to serverAddr
'''
self.handler = handler
self.socket = socket.socket()
self.socket.bind(serverAddr)
self.socket.listen(1)
self._shutdown_request = False
def waitForConnection(self):
'''
Wait until client connects and then create files for sending and receiving data
'''
if self.socket is not None:
self.connection, self.clientAddr = self.socket.accept()
self.rfile = self.connection.makefile("rb", 0)
self.hasConnection = True
def loop(self):
'''
Server loop which reads data and passes it for further processing
'''
while not self._shutdown_request:
try:
data = self.rfile.readline().strip().decode()
if (len(data) != 0):
self._process(data)
time.sleep(0.01)
except socket.error as e:
print("socket error:", e)
self.shutdown()
| break
self._onLoopShutdown()
def shutdown(self):
'''
Set shutdown request for loop to stop execution on next iteration
'''
self._shutdown_requ | est = True
def _onLoopShutdown(self):
'''
Shutdown connection and close socket
'''
if self.hasConnection:
self.connection.shutdown(socket.SHUT_RDWR)
self.connection.close()
self.hasConnection = False
self.rfile = None
self.socket.close()
self.socket = None
def _process(self, data):
'''
Determine message type and pass it to handler for processing
Message format:
'type:message'
'''
split = data.split(":")
msgType = split[0]
msg = split[1]
print("incoming message; type: {0} msg: {1}".format(msgType, msg))
if msgType == "measure":
self.handler.processMeasure(msg)
elif msgType == "problem":
self.handler.processProblem(msg) |
anirudhr/neural | adaline.py | Python | gpl-2.0 | 4,068 | 0.014749 | #!/usr/bin/python2
import math, sys, time
def drange(start, stop, step): #Generator for step <1, from http://stackoverflow.com/questions/477486/python-decimal-range-step-value
r = start
while r < stop:
yield r
r += step
class adaline:
def __init__(self, w_vec):#, bias): #absorbed
self.w_vec = w_vec
#self.bias = bias #absorbed
def transfer(self, yin, isTraining = False):
if isTraining: #training, f(yin) = yin
return yin
else: #not training, f(yin) = bipolar Heaviside step function
if yin >= 0:
return 1
else:
return -1
def calc_yin(self, x_vec): #Calculates yin = x.w + b
if len(x_vec) != len(self.w_vec):
raise Exception('Supplied input length does not match weight length.')
yin = 0
#yin = self.bias #absorbed
for xx,ww in zip(x_vec, self.w_vec):
yin += xx*ww
return yin
def train(self, s_vec_list, t_vec, rate):
if rate <= 0:
raise Exception('Rate not positive: ' + str(rate))
if len(s_vec_list) != len(t_vec):
raise Exception('Training set problem: input count does not match result count.')
insigFlag = False
loopCount = 0
while insigFlag == False and loopCount < numEpochs: #Loop till changes in the weights and bias are insignificant.
for s_vec, tt in zip(s_vec_list, t_vec):
yin = self.calc_yin(s_vec)
yy = self.transfer(yin, | isTraining = True) # yy = yin
w_change = list()
bias_change = -2*rate*(yin - tt)
for i in range(len(self.w_vec)):
w_change.append(bias_change*s_vec[i])
if verbose_flag:
print "yy: ", yy
#print "bias_change: ", bias_change #absorbed
pr | int "w_change: ", w_change
#self.bias = self.bias + bias_change #absorbed
for ii,wc in enumerate(self.w_vec):
self.w_vec[ii] = wc + w_change[ii]
#if math.fabs(bias_change) < 0.1: #absorbed
insigFlag = True #time to check if we need to exit
for wc in w_change:
if math.fabs(wc) < 0.1:
insigFlag = True
else:
insigFlag = False
break
#time.sleep(1)
loopCount += 1
###
verbose_flag = False
if len(sys.argv) > 2:
raise Exception('Too many arguments. Usage: adaline.py [-v|--verbose]')
elif len(sys.argv) == 1:
pass
elif sys.argv[1] == '-v' or sys.argv[1] == '--verbose':
verbose_flag = True
else:
raise Exception('Bad argument. Usage: adaline.py [-v|--verbose]')
numEpochs = 100
#ACTUAL
test_s_vec_list = [[1, 1, 1, 1], [-1, 1, -1, -1], [1, 1, 1, -1], [1, -1, -1, 1]]
test_t_vec = [1, 1, -1, -1]
#AND for 2
#test_s_vec_list = [[1, 1], [1, -1], [-1, 1], [-1, -1]]
#test_t_vec = [1, -1, -1, -1]
#AND for 4
#test_s_vec_list = [[1, 1, 1, 1], [1, -1, 1, -1], [-1, 1, -1, 1], [-1, -1, -1, -1]]
#test_t_vec = [1, -1, -1, -1]
for test_s_vec in test_s_vec_list:
test_s_vec.insert(0,1) #absorbing the bias by placing an input shorted to 1 at the head of each training vector
for alpha in [0.1,0.5]:#drange(0.01,1,0.01):
p = adaline([0 for x in test_s_vec_list[0]])#, 0) #absorbed
#alpha = 0.1 #ACTUAL: 0.5
p.train(test_s_vec_list, test_t_vec, rate=alpha)
if verbose_flag:
print "bias+weights: ", p.w_vec
sol_vec = list()
for test_s_vec in test_s_vec_list:
sol_vec.append(p.transfer(p.calc_yin(test_s_vec), isTraining = False))
if verbose_flag:
print 'Solution: ', sol_vec, '\nExpected (t_vec): ', test_t_vec
match_flag = True
for i,j in zip(sol_vec, test_t_vec):
if i != j:
match_flag = False
break
if match_flag:
print 't_vec matched with rate', alpha
|
darknightghost/AntiPkgLoss | ui/screen.py | Python | gpl-3.0 | 2,542 | 0.04341 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2016,暗夜幽灵 <darknightghost.cn@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import curses
import traceback
import locale
from data import *
from menu import *
class screen:
'''
This class is used to show a menu in console
Example:
from ui.ui import | *
scr = screen.screen()
scr.screen_main(menu)
The menu is a list in the format of
[[type,text,value],
[type,text,value],
[type,text,value],
...
[type,text,value]]
Current support types:
Type Value Description
"lable" None Static text
"submenu" menu Sub Menu Entery
"checkbox" True or False CheckBox
"textbox" string TextBox
"listcontrol" [[text1,tex | t2,text3...],selected-index] Show a list and select one
'''
def __init__(self):
locale.setlocale(locale.LC_ALL, '')
self.stdscr = None
self.width = 0
self.height = 0
def screen_main(self,menu_list,title):
success = True
try:
#Begin GUI
self.stdscr = curses.initscr()
self.height = self.stdscr.getmaxyx()[0]
self.width = self.stdscr.getmaxyx()[1]
curses.noecho()
curses.cbreak()
self.stdscr.keypad(1)
color = color_t()
color.init_color()
self.stdscr.nodelay(0)
#Draw background
self.stdscr.bkgd(' ',color.get_color(0,color_t.BLUE))
curses.curs_set(0)
e = encoder()
self.stdscr.addstr(0,0,e.convert(title),
color.get_color(color_t.WHITE,color_t.BLUE) | curses.A_BOLD)
self.update()
#Create menu window
m = menu(self,title)
m.show_menu(menu_list)
except:
success = False
finally:
#End GUI
self.stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
if not success:
traceback.print_exc()
def update(self):
self.stdscr.refresh()
def get_size(self):
'''
Return the size of screen in the form of (width,height).
'''
return rect_t(self.width,self.height)
|
jacksonicson/paper.IS2015 | times/Times/src/service/times_service.py | Python | mit | 4,006 | 0.008737 | from thrift.protocol import TBinaryProtocol, TCompactProtocol
from thrift.server import TServer
from thrift.transport import TSocket, TTransport
from times import TimeService, ttypes
import os
import re
import StringIO
################################
## Configuration ##
DATA_DIR = os.path.abspath('../../../tdb/')
PORT = 7855
################################
print "Using data dir %s" % DATA_DIR
class Wrapper(ttypes.TimeSeries):
def __init__(self, data):
self.data = data
def read(self, iprot):
print 'WARN: Not implemented'
def write(self, oprot):
oprot.trans.write(self.data)
class TimeSeries(object):
def __write(self, ts, outfile):
outfile = os.path.join(DATA_DIR, outfile)
f = open(outfile, 'wb')
t = TTransport.TFileObjectTransport(f)
prot = TBinaryProtocol.TBinaryProtocolAccelerated(t)
ts.write(prot)
f.close()
def __read(self, infile):
infile = os.path.join(DATA_DIR, infile)
if not os.path.exists(infile):
return None
f = open(infile, 'rb')
io = StringIO.StringIO()
while True:
chunk = f.read(32)
if chunk:
io.write(chunk)
else:
break
f.close()
value = io.getvalue()
io.close()
return Wrapper(value)
def _read_decode(self, infile):
infile = os.path.join(DATA_DIR, infile)
if not os.path.exists(infile):
return None
f = open(infile, 'rb')
t = TTransport.TFileObjectTransport(f)
prot = TBinaryProtocol.TBinaryProtocolAccelerated(t)
ts = ttypes.TimeSeries()
ts.read(prot)
f.close()
return ts
def __filename(self, name):
return name + '.times'
def _delete(self, name):
del_file = os.path.join(DATA_DIR, self.__filename(name))
if os.path.exists(del_file) and os.path.isfile(del_file):
os.remove(del_file)
def _find(self, pattern):
result = []
pattern = re.compile(pattern)
for element in os.listdir(DATA_DIR):
element = element.replace('.times', '')
if pattern.match(element):
result.append(element)
return res | ult
def _create(self, name, frequency):
ts = ttypes.TimeSeries()
| ts.name = name
ts.frequency = frequency
ts.elements = []
self.__write(ts, self.__filename(name))
def _append(self, name, elements):
ts = self._read_decode(self.__filename(name))
if ts is None:
print 'ERROR: TS not found %s' % (name)
return
if ts.elements is None:
ts.elements = []
ts.elements.extend(elements)
self.__write(ts, self.__filename(name))
def _loadFile(self, name):
ts = self.__read(self.__filename(name))
return ts
class TimesHandler(TimeSeries):
def load(self, name):
ts = super(TimesHandler, self)._loadFile(name)
return ts
def create(self, name, frequency):
super(TimesHandler, self)._create(name, frequency)
def append(self, name, elements):
super(TimesHandler, self)._append(name, elements)
def find(self, pattern):
return super(TimesHandler, self)._find(pattern)
def remove(self, name):
return super(TimesHandler, self)._delete(name)
def main():
handler = TimesHandler()
processor = TimeService.Processor(handler)
transport = TSocket.TServerSocket(port=PORT)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TThreadedServer(processor, transport, tfactory, pfactory)
print 'Times listening...'
server.serve()
|
pitunti/alfaPitunti | plugin.video.alfa/servers/kbagi.py | Python | gpl-3.0 | 1,896 | 0.00211 | # -*- coding: utf-8 -*-
from core import httptools
from core import jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
if "kbagi.com" in page_url:
from channels import kbagi
logueado, error_message = kbagi.login("kbagi.com")
if not logueado:
return False, error_message
data = httptools.downloadpage(page_url).data
if ("File was deleted" or "Not Found" or "File was locked by administrator") in data:
return False, "[kbagi] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, pre | mium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
| video_urls = []
data = httptools.downloadpage(page_url).data
host = "http://kbagi.com"
host_string = "kbagi"
if "diskokosmiko.mx" in page_url:
host = "http://diskokosmiko.mx"
host_string = "diskokosmiko"
url = scrapertools.find_single_match(data, '<form action="([^"]+)" class="download_form"')
if url:
url = host + url
fileid = url.rsplit("f=", 1)[1]
token = scrapertools.find_single_match(data,
'<div class="download_container">.*?name="__RequestVerificationToken".*?value="([^"]+)"')
post = "fileId=%s&__RequestVerificationToken=%s" % (fileid, token)
headers = {'X-Requested-With': 'XMLHttpRequest'}
data = httptools.downloadpage(url, post, headers).data
data = jsontools.load(data)
mediaurl = data.get("DownloadUrl")
extension = data.get("Extension")
video_urls.append([".%s [%s]" % (extension, host_string), mediaurl])
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
return video_urls
|
BuzzFeedNews/bikeshares | bikeshares/programs/nyc.py | Python | mit | 1,861 | 0.01021 | import bikeshares
import pandas as pd
import numpy as np
def convert_rider_gender(x):
if x == 0: return np.nan
if x == 1: return "M"
if x == 2: return "F"
raise Exception("Unrecognized gender variable: {0}".format(x))
def convert_rider_type(x):
if x == "Subscriber": return "member"
if x == "Customer": return "non-member"
raise Exception("Unrecognized rider type: {0}".format(x))
class CitiBike(bikeshares.program.BikeShareProgram):
def parse_trips(self, data_path):
parsed = pd.read_csv(data_path,
usecols=["starttime", "stoptime", "tripduration",
| "start station id", "end station id",
"bikeid", "usertype", "gender", "birth year" ],
parse_dates=["starttime", "stoptime"])
mapped = pd.DataFrame({
"start_time": parsed["starttime"],
"start_station": parsed["start station id"],
"end_time": parsed["stoptime"],
"end_station": parsed["end station id"],
" | duration": parsed["tripduration"],
"bike_id": parsed["bikeid"],
"rider_type": parsed["usertype"].apply(convert_rider_type),
"rider_gender": parsed["gender"].apply(convert_rider_gender),
"rider_birthyear": parsed["birth year"]
})
return mapped
def parse_stations(self, data_path):
parsed = pd.read_csv(data_path,
usecols=[ "start station id", "start station name",
"start station latitude", "start station longitude" ])
mapped = pd.DataFrame({
"id": parsed["start station id"],
"name": parsed["start station name"],
"lat": parsed["start station latitude"],
"lng": parsed["start station longitude"]
}).groupby("id").first().reset_index()
return mapped
|
JulianSchuette/android-instrumentation | injector/injector/apk.py | Python | apache-2.0 | 726 | 0.006887 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012, The Honeynet Project. All rights reserved.
# Author: Kun Yang <kelwya@gmail.com>
#
# APKIL is free software: you can redistribute it and/or modify it under
# the terms of version 3 of the GNU Lesser General Public License as
# published by the Free Software Foundation.
#
# APKIL is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR P | URPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with APKIL. If not, see <http://www.gnu.org/l | icenses/>.
|
vineethguna/heroku-buildpack-libsandbox | vendor/pygal-0.13.0/pygal/config.py | Python | mit | 8,580 | 0.000816 | # -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""
Config module with all options
"""
from copy import deepcopy
from pygal.style import Style, DefaultStyle
class FontSizes(object):
"""Container for font sizes"""
CONFIG_ITEMS = []
class Key(object):
_categories = []
def __init__(
self, default_value, type_, category, doc,
subdoc="", subtype=None):
self.value = default_value
self.type = type_
self.doc = doc
self.category = category
self.subdoc = subdoc
self.subtype = subtype
self.name = "Unbound"
if not category in self._categories:
self._categories.append(category)
CONFIG_ITEMS.append(self)
@property
def is_boolean(self):
return self.type == bool
@property
def is_numeric(self):
return self.type == int
@property
def is_string(self):
return self.type == str
@property
def is_list(self):
return self.type == list
def coerce(self, value):
if self.type == Style:
return value
elif self.type == list:
return self.type(
map(
self.subtype, map(
lambda x: x.strip(), value.split(','))))
return self.type(value)
class MetaConfig(type):
def __new__(mcs, classname, bases, classdict):
for k, v in classdict.items():
if isinstance(v, Key):
v.name = k
return type.__new__(mcs, classname, bases, classdict)
class Config(object):
"""Class holding config values"""
__metaclass__ = MetaConfig
style = Key(
DefaultStyle, Style, "Style", "Style holding values injected in css")
css = Key(
('style.css', 'graph.css'), list, "Style",
"List of css file",
"It can be an absolute file path or an external link",
str)
############ Look ############
title = Key(
None, str, "Look",
"Graph title.", "Leave it to None to disable title.")
width = Key(
800, int, "Look", "Graph width")
height = Key(
600, int, "Look", "Graph height")
show_dots = Key(True, bool, "Look", "Set to false to remove dots")
stroke = Key(
True, bool, "Look",
"Line dots (set it to false to get a scatter plot)")
| fill = Key(
False, bool, "Look", "Fill areas under lines")
show_legend = Key(
True, bool, "Look", "Set to false to remove legend")
legend_at_bottom = Key(
False, bool, "Look", "Set to true to position legend at bottom")
legend_box_size = Key(
12, int, "Look", "Size of legend boxes")
rounded_bars = Key(
None, int, "Look", "Set this to the desired radius in px")
############ Label ############
x_labels = Key(
None, list, "La | bel",
"X labels, must have same len than data.",
"Leave it to None to disable x labels display.",
str)
y_labels = Key(
None, list, "Label",
"You can specify explicit y labels",
"Must be a list of numbers", float)
x_label_rotation = Key(
0, int, "Label", "Specify x labels rotation angles", "in degrees")
y_label_rotation = Key(
0, int, "Label", "Specify y labels rotation angles", "in degrees")
############ Value ############
human_readable = Key(
False, bool, "Value", "Display values in human readable format",
"(ie: 12.4M)")
logarithmic = Key(
False, bool, "Value", "Display values in logarithmic scale")
interpolate = Key(
None, str, "Value", "Interpolation, this requires scipy module",
"May be any of 'linear', 'nearest', 'zero', 'slinear', 'quadratic,"
"'cubic', 'krogh', 'barycentric', 'univariate',"
"or an integer specifying the order"
"of the spline interpolator")
interpolation_precision = Key(
250, int, "Value", "Number of interpolated points between two values")
order_min = Key(
None, int, "Value", "Minimum order of scale, defaults to None")
range = Key(
None, list, "Value", "Explicitly specify min and max of values",
"(ie: (0, 100))", int)
include_x_axis = Key(
False, bool, "Value", "Always include x axis")
zero = Key(
0, int, "Value",
"Set the ordinate zero value",
"Useful for filling to another base than abscissa")
############ Text ############
no_data_text = Key(
"No data", str, "Text", "Text to display when no data is given")
label_font_size = Key(10, int, "Text", "Label font size")
value_font_size = Key(8, int, "Text", "Value font size")
tooltip_font_size = Key(20, int, "Text", "Tooltip font size")
title_font_size = Key(16, int, "Text", "Title font size")
legend_font_size = Key(14, int, "Text", "Legend font size")
no_data_font_size = Key(64, int, "Text", "No data text font size")
print_values = Key(
True, bool,
"Text", "Print values when graph is in non interactive mode")
print_zeroes = Key(
False, bool,
"Text", "Print zeroes when graph is in non interactive mode")
truncate_legend = Key(
None, int, "Text",
"Legend string length truncation threshold", "None = auto")
truncate_label = Key(
None, int, "Text",
"Label string length truncation threshold", "None = auto")
############ Misc ############
js = Key(
('https://raw.github.com/Kozea/pygal.js/master/svg.jquery.js',
'https://raw.github.com/Kozea/pygal.js/master/pygal-tooltips.js'),
list, "Misc", "List of js file",
"It can be a filepath or an external link",
str)
disable_xml_declaration = Key(
False, bool, "Misc",
"Don't write xml declaration and return str instead of string",
"usefull for writing output directly in html")
explicit_size = Key(
False, bool, "Misc", "Write width and height attributes")
pretty_print = Key(
False, bool, "Misc", "Pretty print the svg")
strict = Key(
False, bool, "Misc",
"If True don't try to adapt / filter wrong values")
def __init__(self, **kwargs):
"""Can be instanciated with config kwargs"""
for k in dir(self):
v = getattr(self, k)
if (k not in self.__dict__ and not
k.startswith('_') and not
hasattr(v, '__call__')):
if isinstance(v, Key):
v = v.value
setattr(self, k, v)
self.css = list(self.css)
self.js = list(self.js)
self._update(kwargs)
def __call__(self, **kwargs):
"""Can be updated with kwargs"""
self._update(kwargs)
def _update(self, kwargs):
self.__dict__.update(
dict([(k, v) for (k, v) in kwargs.items()
if not k.startswith('_') and k in dir(self)]))
def font_sizes(self, with_unit=True):
"""Getter for all font size configs"""
fs = FontSizes()
for name in dir(self):
if name.endswith('_font_size'):
setattr(
fs,
name.replace('_font_size', ''),
('%dpx' % getattr(self, name))
if with_unit else getattr(self, name))
return fs
def to_dict(self):
|
sirodoht/ting | API/chat/tests.py | Python | mit | 24,874 | 0.000804 | import time
import json
import datetime
import urllib
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from django_dynamic_fixture import G
from django.utils.dateformat import format
from .utils import datetime_to_timestamp, timestamp_to_datetime
from .models import Message, Channel
def create_message(text, timestamp, username, channel):
"""
Creates a message with the given text, datetime,
username, channel and with typing set to True.
"""
return Message.objects.create(
text=text,
datetime_start=timestamp_to_datetime(timestamp),
username=username,
typing=True,
channel=channel
)
class ChatClient(Client):
def delete(self, url, qstring):
return Client().delete(
url,
qstring,
content_type='application/x-www-form-urlencoded'
)
def patch(slef, url, qstring):
return Client().patch(
url,
qstring,
content_type='application/x-www-form-urlencoded'
)
class ChatTests(TestCase):
def setUp(self):
super(ChatTests, self).setUp()
self.channel = G(Channel, name='Channel')
class MessageViewPOSTTests(ChatTests):
def post_and_get_response(self, text, timestamp, username, typing):
"""
Posts a message on chat:message and returns the response
"""
return self.client.post(
reverse('chat:message', args=(self.channel.name,)),
{'text': text, 'username': username, 'datetime_start': timestamp, 'typing': typing}
)
def test_post_valid_message(self):
"""
When a valid message is sent, the view should
save the message in the database and return
the id of the message.
"""
timestamp = 10 ** 11
username = 'vitsalisa'
text = 'Message'
response = self.post_and_get_response(
text=text,
timestamp=timestamp,
username=username,
typing=True
)
messages = Message.objects.filter(username=username)
self.assertTrue(messages.exists())
self.assertEquals(len(messages), 1)
self.assertEqual(response.status_code, 200)
message = Message.objects.get(username=username);
self.assertEqual(int(response.content), message.id);
self.assertEqual(message.username, username);
self.assertTrue(message.typing)
self.assertEqual(message.text, text)
self.assertEqual(datetime_to_timestamp(message.datetime_start), timestamp)
def test_post_message_without_datetime_start(self):
"""
When a message is sent without a datetime_start the view
should produce an appropriate error and a 400(Bad Request)
status code. The message should not be saved.
"""
post_dict = {'text': 'Message', 'username': 'vitsalis', 'typing': True}
response = self.client.post(
reverse('chat:message', args=(self.channel.name,)),
post_dict
)
self.assertFalse(Message.objects.filter(username='vitsalis').exists())
self.assertEqual(response.status_code, 400)
def test_post_message_without_username(self):
"""
When a message is sent without a username the view
should produce an appropriate error and a 400(Bad Request)
status code. The message should not be saved.
"""
timestamp = 10 ** 11
post_dict = {'text': 'Message', 'datetime_start': timestamp, 'typing': True}
response = self.client.post(
reverse('chat:message', args=(self.channel.name,)),
post_dict
)
datetime_start_field = timestamp_to_datetime(timestamp)
self.assertFalse(Message.objects.filter(datetime_start=datetime_start_field).exists())
self.assertEqual(response.status_code, 400)
def test_post_message_with_invalid_channel_name(self):
"""
When a message is sent with an invalid channel name
the view should produce an appropriate error and a
404(Not Found) status code. The message should not be saved.
"""
timestamp = 10 ** 11
response = self.client.post(
reverse('chat:message', args=('invalid_channel',)),
{'text': 'Message', 'username': 'vitsalis', 'datetime_start': timestamp, 'typing': True}
)
self.assertFalse(Message.objects.filter(username='vitsalis').exists())
self.assertEqual(response.status_code, 404)
def test_post_message_without_text(self):
"""
When a message is sent without a channel_id the view
should produce an appropriate error and a 400(Bad Request)
status code. The message should not be saved.
"""
timestamp = 10 ** 11
post_dict = {'username': 'vitsalis', 'datetime_start': timestamp, 'typing': True}
response = self.client.post(
reverse('chat:message', args=(self.channel.name,)),
post_dict
)
self.assertFalse(Message.objects.filter(username='vitsalis').exists())
self.assertEqual(response.status_code, 400)
def test_post_message_with_invalid_datetime_start(self):
"""
When a message is sent with an invalid datetime the view
should produce an appropriate error and a 400(Bad Request)
status code. The message should not be saved.
"""
response = self.post_and_get_response(
text='Message',
timestamp='wtf',
username='vitsalis',
typing=True
)
self.assertFalse(Message.objects.filter(username='vitsalis').exists())
self.assertEqual(response.status_code, 400)
def test_post_message_with_future_datetime_start(self):
"""
When a message is sent with a future datetime the view
should change the datetime to the current one and save the message.
"""
timestamp = int(format(datetime.datetime.utcnow() + datetime.timedelta(days=1), 'U')) * 1000
response = self.post_and_get_response(
text='Message',
timestamp=timestamp,
username='vitsalis',
typing=True
)
messages = Message.objects.filter(username='vitsalis')
self.assertTrue(messages.exists())
self.assertEqual(len(messages), 1)
self.assertTrue(datetime_to_timestamp(messages[0].datetime_start) < timestamp)
self.assertEqual(response.status_code, 200)
self.assertEqual(int(response.content), messages[0].id)
def test_post_message_with_typing_false(self):
"""
When typing is False the view should save the message
and make its datetime_sent equal to datetime_start.
"""
timestamp = 10 ** 11
response = self.post_and_get_response(
text='Message',
timestamp=timestamp,
username='vitsalis',
typing=False
)
messages = Message.objects.filter(username='vitsalis')
self.assertTrue(messages.exists())
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].datetime_sent, messages[0].datetime_start)
class MessageViewGETTests(ChatTests):
def test_request_messages(self):
"""
When a valid request is sent the view should return
a JSON object containing messages. Each message should be
in the form {text: ...,username: ..., datetime: ...}.
The messages should be in chronological order(more recent first).
The number of objects is specified by the lim argument.
"""
lim = 2
timestamp = 10 ** 11
message1 = Message.objects.create(
text='Message1',
datetime_start=timestamp_to_da | tetime(timestamp),
datetime_sent=timestamp_to_datetime(timestamp + 10),
username='vitsalis',
typing=True,
channel=self.channel
| )
message2 = Message.objects.create(
text='Message2',
datetime_start=timestamp_to_datet |
vbursztyn/SegundoVoto | application/persistence.py | Python | agpl-3.0 | 1,800 | 0.035 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from urlparse import urlparse
import os
import pymongo
import json
class MongoPersistence():
def __init__(self, collection):
self.collectionName = collection
MONGO_URI = os.environ.get('MONGOLAB_URI')
if MONGO_URI:
self.client = pymongo.MongoClient(MONGO_URI)
self.db = self.client[urlparse(MONGO_URI).path[1:]]
else:
self.client = pymongo.MongoClient('localhost, 27017')
self.db = self.client['test' | ]
| def getInterface(self):
collection = self.db[self.collectionName]
results = list()
for result in collection.find():
results.append(result)
return results
def getResult(self, pType, pId, year, subject, position):
collection = self.db[self.collectionName]
results = dict()
for result in collection.find({ 'pType': pType, 'pId': pId, 'year': year, \
'subject': subject, 'position' : position }):
for company, values in result['results'].iteritems():
results[company] = { 'in_favor_count': int(values['in_favor_count']), \
'against_count': int(values['against_count']) }
return results
def getProjectDetails(self, pType, pId, year, subject, position):
collection = self.db[self.collectionName]
results = dict()
for result in collection.find({ 'pType': pType, 'pId': pId, 'year': year, \
'subject': subject, 'position' : position }):
for company, congressmen in result['details'].iteritems():
results[company] = congressmen
return json.dumps(results, indent=4)
def getDescription(self, pType, pId, year, subject):
collection = self.db[self.collectionName]
return json.dumps(collection.find_one({ 'pType': pType, 'pId': pId, 'year': year, \
'subject': subject })['description'])
def close(self):
self.client.close()
|
varunarya10/rally | tests/unit/benchmark/scenarios/quotas/test_utils.py | Python | apache-2.0 | 4,630 | 0 | # Copyright 2014: Kylin Cloud
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from rally.benchmark.scenarios.quotas import utils
from tests.unit import fakes
from tests.unit import test
class QuotasScenarioTestCase(test.TestCase):
def setUp(self):
super(QuotasScenarioTestCase, self).setUp()
def test__update_quotas(self):
tenant_id = "fake_tenant"
quotas = {
"metadata_items": 10,
"key_pairs": 10,
"injected_file_content_bytes": 1024,
"injected_file_path_bytes": 1024,
"ram": 5120,
"instances": 10,
"injected_files": 10,
"cores": 10,
}
fake_nova = fakes.FakeNovaClient()
fake_nova.quotas.update = mock.MagicMock(return_value=quotas)
fake_clients = fakes.FakeClients()
fake_clients._nova = fake_nova
scenario = utils.QuotasScenario(admin_clients=fake_clients)
scenario._generate_quota_values = mock.MagicMock(return_value=quotas)
result = scenario._update_quotas("nova", tenant_id)
self.assertEqual(quotas, result)
fake_nova.quotas.update.assert_called_once_with(tenant_id, **quotas)
self._test_atomic_action_timer(scenario.atomic_actions(),
"quotas.update_quotas")
def test__update_quotas_fn(self):
tenant_id = "fake_tenant"
quotas = {
"metadata_items": 10,
"key_pairs": 10,
"injected_file_content_bytes": 1024,
"injected_file_path_bytes": 1024,
"ram": 5120,
"instances": 10,
"injected_files": 10,
"cores": 10,
}
fake_nova = fakes.FakeNovaClient()
fake_nova.quotas.update = mock.MagicMock(r | eturn_value=quotas)
fake_clients = fakes.FakeClients()
fake_clients._nova = fake_nova
scenario = utils.QuotasScenario(admin_clients=fake_clients)
scenario._generate_quota_values = mock.MagicMock(return_value=quotas)
mock_quota = mock.Mock(return_value=quotas)
result = scenario._update_quotas("nova", tenant_id,
| quota_update_fn=mock_quota)
self.assertEqual(quotas, result)
self._test_atomic_action_timer(scenario.atomic_actions(),
"quotas.update_quotas")
def test__generate_quota_values_nova(self):
max_quota = 1024
scenario = utils.QuotasScenario(admin_clients=fakes.FakeClients())
quotas = scenario._generate_quota_values(max_quota, "nova")
for k, v in six.iteritems(quotas):
self.assertTrue(-1 <= v <= max_quota)
def test__generate_quota_values_cinder(self):
max_quota = 1024
scenario = utils.QuotasScenario(admin_clients=fakes.FakeClients())
quotas = scenario._generate_quota_values(max_quota, "cinder")
for k, v in six.iteritems(quotas):
self.assertTrue(-1 <= v <= max_quota)
def test__generate_quota_values_neutron(self):
max_quota = 1024
scenario = utils.QuotasScenario(admin_clients=fakes.FakeClients())
quotas = scenario._generate_quota_values(max_quota, "neutron")
for v in six.itervalues(quotas):
for v1 in six.itervalues(v):
for v2 in six.itervalues(v1):
self.assertTrue(-1 <= v2 <= max_quota)
def test__delete_quotas(self):
tenant_id = "fake_tenant"
fake_nova = fakes.FakeNovaClient()
fake_nova.quotas.delete = mock.MagicMock()
fake_clients = fakes.FakeClients()
fake_clients._nova = fake_nova
scenario = utils.QuotasScenario(admin_clients=fake_clients)
scenario._delete_quotas("nova", tenant_id)
fake_nova.quotas.delete.assert_called_once_with(tenant_id)
self._test_atomic_action_timer(scenario.atomic_actions(),
"quotas.delete_quotas")
|
bram85/topydo | test/facilities.py | Python | gpl-3.0 | 1,802 | 0 | # Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <bram@topydo.org>
#
# This program is free software: you can redistribute it and/or modify
# it under | the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR | A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from topydo.lib.printers.PrettyPrinter import PrettyPrinter
from topydo.lib.Todo import Todo
from topydo.lib.TodoFile import TodoFile
from topydo.lib.TodoList import TodoList
def load_file(p_filename):
"""
Loads a todo file from the given filename and returns a list of todos.
"""
todolist = load_file_to_raw_list(p_filename)
return [Todo(src) for src in todolist]
def load_file_to_raw_list(p_filename):
"""
Loads a todo file from the given filename and returns a list of todo
strings (unparsed).
"""
todofile = TodoFile(p_filename)
return todofile.read()
def load_file_to_todolist(p_filename):
"""
Loads a todo file to a TodoList instance.
"""
todolist = load_file_to_raw_list(p_filename)
return TodoList(todolist)
def todolist_to_string(p_list):
""" Converts a todo list to a single string. """
return '\n'.join([t.source() for t in p_list])
def print_view(p_view):
printer = PrettyPrinter()
return "\n".join([str(s) for s in printer.print_list(p_view.todos)])
|
OAButton/tricorder | plugins/python/metaheaders.py | Python | bsd-3-clause | 2,791 | 0.039412 | #!/usr/bin/env python2.6
import lxml.html, re, HTMLParser
class InvalidArguments(Exception):
pass
class MetaHeaders:
def __init__(self, url=None, page=None,name='name',content='content', unescape_entities=False):
if page:
self.root = lxml.html.document_fromstring(page)
elif url:
self.root = lxml.html.parse(url).getroot()
else:
raise InvalidArguments, "Need a URL or an HTML page"
meta = {}
# Some sites (IEEE) triple escape entities, e.g., R&amp;#x0026;D
if unescape_entities:
htmldecoder = HTMLParser.HTMLParser()
for m in self.root.cssselect("meta"):
attr=m.attrib
if attr.has_key(name) and attr.has_key(content) and attr[content] != "":
k = attr[name]
v = attr[content].strip()
if unescape_entities:
v = htmldecoder.unescape(htmldecoder.unescape(v))
if not meta.has_key(k):
meta[k] = []
meta[k].append(v)
self.meta = meta
def get_item(self, k):
items = self.get_multi_item(k)
if items:
return items[0]
else:
return None
def get_multi_item(self, k):
if self.meta.has_key(k):
return self.meta[k]
else:
return None
def print_item(self, entry, key):
el = self.get_multi_item(key)
if not el:
return
for e in el:
print "%s\t%s" % (entry, e)
def print_date(self, key):
date = self.get_item(key)
if not date:
return
year = None
month = None
day = None
m = re.search(r'(\d\d\d\d)(?:[-/])(\d+)(?:[-/])(\d+)', date)
if m:
year = m.group(1)
month = m.group(2)
day = m.group(3)
if not year:
m = re.search(r'(\d\d\d\d)(?:[-/])(\d+)', date)
if m:
year = m.group(1)
month = m.group(2)
if not year:
m = re.search(r'(\d\d\d\d)', date)
if m:
year = m.group(1)
m = re.search(r"([a-z]+)", date, re.IGNORECASE)
if m:
months = {
'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12,
'January': 1, 'February': 2, 'March': 3, 'April': 4,
'May': 5, 'June': 6, 'July': 7, 'August': 8,
'September': 9, 'October': 10, 'November': 11, 'December': 12
}
try:
print "month\t%s" % months[m.group(1).capitalize()]
month = None
except:
pass
if year:
print "year\t%s" % year
if month:
print "month\t%s" % month
if day:
print "day\t%s" % day
def test():
url = "http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=4755987"
print "getting %s " % url
me | taheaders = MetaHeaders(url=url)
for (k,v) in metaheaders.meta.items():
print "%s = %s" % (k,v)
print "===============\nRepeat with manual fetch"
from urllib2 import urlopen
page = urlopen(url).read()
metaheaders = | MetaHeaders(page=page)
for (k,v) in metaheaders.meta.items():
print "%s = %s" % (k,v)
if __name__ == '__main__':
test()
|
yuanlisky/linlp | linlp/algorithm/viterbiMat/prob_start_organization.py | Python | apache-2.0 | 282 | 0.056738 | prob_start = {
'P': -3.14e+100,
'B': -3.14e+100,
'M': -3.14e+100,
'S': 0.0,
'X | ': -3.14e+100,
'L': -3.14e+100,
'F': -3.14e+100,
| 'W': -3.14e+100,
'D': -3.14e+100,
'G': -3.14e+100,
'K': -3.14e+100,
'I': -3.14e+100,
'A': -3.14e+100,
'Z': -3.14e+100,
'J': -3.14e+100,
'C': -3.14e+100,
}
|
cloudbase/neutron | neutron/agent/windows/utils.py | Python | apache-2.0 | 3,141 | 0 | # Copyright 2015 Cloudbase Solutions.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from eventlet.green import subprocess
from eventlet import greenthread
from neutron_lib.utils import helpers
from oslo_log import log as logging
from oslo_utils import encodeutils
from neutron._i18n import _
from neutron.common import utils
LOG = logging.getLogger(__name__)
def create_process(cmd, addl_env=None):
cmd = list(map(str, cmd))
LOG.debug("Running command: %s", cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
obj = utils.subprocess_popen(cmd, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
preexec_fn=None,
close_fds=False)
return obj, cmd
def execute(cmd, process_input=None, addl_env=None,
check_exit_code=True, return_stderr=False, log_fail_as_error=True,
extra_ok_codes=None, run_as_root=False, do_decode=True):
try:
if process_input is not None:
_process_input = encodeutils.to_utf8(process_input)
else:
_process_input = None
obj, cmd = create_process(cmd, addl_env=addl_env)
_stdout, _stderr = obj.communicate(_process_input)
obj.stdin.close()
_stdout = helpers.safe_decode_utf8(_stdout)
_stderr = helpers.safe_decode_utf8(_stderr)
m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdin: %(stdin)s\n"
"Stdout: %(stdout)s\nStderr: %(stderr)s") % \
{'cmd': cmd,
'code': obj.returncode,
'stdin': process_input or '',
'stdout': _stdout,
'stderr': _stderr}
extra_ok_codes = extra_ok_codes or []
if obj.returncode and obj.returncode in extra_ok_codes:
obj.returncode = None
log_msg = m.strip().replace('\n', '; ')
if obj.returncode and log_fail_as_error:
LOG.error(log_msg)
else:
LOG.debug(log_msg)
if obj.returncode and c | heck_exit_code:
raise RuntimeError(m)
finally:
# NOTE(termie): this appears to be necessary to let the | subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
return (_stdout, _stderr) if return_stderr else _stdout
|
UITools/saleor | saleor/product/migrations/0080_auto_20181214_0440.py | Python | bsd-3-clause | 616 | 0 | # Generated by Django 2.1.3 on 2018-12-14 10:40
from django.db import migrations, models
clas | s Migration(migrations.Migration):
|
dependencies = [
('product', '0079_default_tax_rate_instead_of_empty_field'),
]
operations = [
migrations.AddField(
model_name='category',
name='background_image_alt',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='collection',
name='background_image_alt',
field=models.CharField(blank=True, max_length=128),
),
]
|
shiquanwang/numba | numba/tests/test_filter2d.py | Python | bsd-2-clause | 1,782 | 0.005612 | #! /usr/bin/env python
# ______________________________________________________________________
'''test_filter2d
Test the filter2d() example from the PyCon'12 slide deck.
'''
# ______________________________________________________________________
import numpy
from numba import *
from numba.decorators import jit
import sys
import unittest
# ______________________________________________________________________
def filter2d(image, filt):
M, N = image.shape
Mf, Nf = filt.shape
Mf2 = Mf // 2
Nf2 = Nf // 2
result = numpy.zeros_like(image)
for i in range(Mf2, M - Mf2):
for j in range(Nf2, N - Nf2):
num = 0.0
for ii in range(Mf):
for jj in range(Nf):
num += (filt[Mf-1-ii, Nf-1-jj] * image[i-Mf2+ii, j-Nf2+jj])
result[i, j] = num
return result
# ______________________________________________________________________
class Te | stFilter2d(unittest.TestCase):
def test_vectorized_filter2d(self):
ufilter2d = jit(argtypes=[double[:,:], double[:,:]],
restype=double[:,:])(filter2d)
image = numpy.random.random(( | 50, 50))
filt = numpy.random.random((5, 5))
filt /= filt.sum()
plain_old_result = filter2d(image, filt)
hot_new_result = ufilter2d(image, filt)
self.assertTrue((abs(plain_old_result - hot_new_result) < 1e-9).all())
# ______________________________________________________________________
@autojit
def func():
return numpy.empty(10)
if __name__ == "__main__":
# func()
# TestFilter2d('test_vectorized_filter2d').debug()
unittest.main(*sys.argv[1:])
# ______________________________________________________________________
# End of test_filter2d.py
|
vbkaisetsu/clopure | clopure/exceptions.py | Python | mit | 273 | 0 | c | lass ClopureSyntaxError(Exception):
def __init__(self, *args, pos=0, **kwargs):
super().__init__(*args, **kwargs)
self.pos = pos
class ClopureRuntimeError(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| |
LethusTI/supportcenter | vendor/django/tests/regressiontests/admin_inlines/admin.py | Python | gpl-3.0 | 2,914 | 0.001716 | from django.contrib import admin
from django import forms
from models import *
site = admin.AdminSite(name="admin")
class BookInline(admin.TabularInline):
model = Author.books.through
class AuthorAdmin(admin.ModelAdmin):
inlines = [BookInline]
class InnerInline(admin.StackedInline):
model = Inner
can_delete = False
readonly_fields = ('readonly',) # For bug #13174 tests.
class HolderAdmin(admin.ModelAdmin):
class Media:
js = ('my_awesome_admin_scripts.js',)
class InnerInline2(admin.StackedInline):
model = Inner2
class Media:
js = ('my_awesome_inline_scripts.js',)
class InnerInline3(admin.StackedInline):
model = Inner3
class Media:
js = ('my_awesome_inline_scripts.js',)
class TitleForm(form | s.ModelForm):
def clean(self):
cleaned_data = self.cleaned_data
title1 = cleaned_data.get("title1")
title2 = cleaned_data.get("title2")
if title1 != title2:
raise forms.ValidationError("The two titles must be the same")
return cleaned_data
class TitleInline(admin.TabularInline):
model = Title
form = TitleForm
extra = 1
class Inner4Stac | kedInline(admin.StackedInline):
model = Inner4Stacked
class Inner4TabularInline(admin.TabularInline):
model = Inner4Tabular
class Holder4Admin(admin.ModelAdmin):
inlines = [Inner4StackedInline, Inner4TabularInline]
class InlineWeakness(admin.TabularInline):
model = ShoppingWeakness
extra = 1
class QuestionInline(admin.TabularInline):
model = Question
readonly_fields=['call_me']
def call_me(self, obj):
return 'Callable in QuestionInline'
class PollAdmin(admin.ModelAdmin):
inlines = [QuestionInline]
def call_me(self, obj):
return 'Callable in PollAdmin'
class ChapterInline(admin.TabularInline):
model = Chapter
readonly_fields=['call_me']
def call_me(self, obj):
return 'Callable in ChapterInline'
class NovelAdmin(admin.ModelAdmin):
inlines = [ChapterInline]
class ConsigliereInline(admin.TabularInline):
model = Consigliere
class SottoCapoInline(admin.TabularInline):
model = SottoCapo
class ProfileInline(admin.TabularInline):
model = Profile
extra = 1
site.register(TitleCollection, inlines=[TitleInline])
# Test bug #12561 and #12778
# only ModelAdmin media
site.register(Holder, HolderAdmin, inlines=[InnerInline])
# ModelAdmin and Inline media
site.register(Holder2, HolderAdmin, inlines=[InnerInline2])
# only Inline media
site.register(Holder3, inlines=[InnerInline3])
site.register(Poll, PollAdmin)
site.register(Novel, NovelAdmin)
site.register(Fashionista, inlines=[InlineWeakness])
site.register(Holder4, Holder4Admin)
site.register(Author, AuthorAdmin)
site.register(CapoFamiglia, inlines=[ConsigliereInline, SottoCapoInline])
site.register(ProfileCollection, inlines=[ProfileInline]) |
paul30001/pikapy1 | pikapy/ptcexceptions.py | Python | gpl-3.0 | 658 | 0 | __all__ = [
'PTCException',
'PTCInvalidStatusCodeException',
'PTCInvalidNameException',
'PTCInvalidEmailException',
'PTCInvalidPasswordException',
]
class PTCException(Exception):
| """Base exception for all PTC Account exceptions"""
pass
class PTCInvalidStatusCodeException(Exception):
"""Base exception for all PTC Account exceptions"""
pass
class PTCInvalidNameException(PTCException):
"""Username already in use"""
pass
class PTCInvalidEmailException(PTCException):
"""Email | invalid or already in use"""
pass
class PTCInvalidPasswordException(PTCException):
"""Password invalid"""
pass
|
QiJune/Paddle | python/paddle/fluid/tests/unittests/dist_transformer.py | Python | apache-2.0 | 63,417 | 0.000347 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import argparse
import time
import math
import os
import sys
import six
import argparse
import ast
import multiprocessing
import time
from functools import partial
from os.path import expanduser
import glob
import random
import tarfile
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
from test_dist_base import TestDistRunnerBase, runtime_main
import paddle.compat as cpt
from paddle.compat import long_type
import hashlib
from paddle.fluid.transp | iler.details import program_to_code
const_para_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant(0.001))
const_bias_attr = const_para_a | ttr
# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
#from transformer_config import ModelHyperParams, TrainTaskConfig, merge_cfg_from_list
class TrainTaskConfig(object):
# only support GPU currently
use_gpu = True
# the epoch number to train.
pass_num = 1
# the number of sequences contained in a mini-batch.
# deprecated, set batch_size in args.
batch_size = 20
# the hyper parameters for Adam optimizer.
# This static learning_rate will be multiplied to the LearningRateScheduler
# derived learning rate the to get the final learning rate.
learning_rate = 1
beta1 = 0.9
beta2 = 0.98
eps = 1e-9
# the parameters for learning rate scheduling.
warmup_steps = 4000
# the weight used to mix up the ground-truth distribution and the fixed
# uniform distribution in label smoothing when training.
# Set this as zero if label smoothing is not wanted.
label_smooth_eps = 0.1
# the directory for saving trained models.
model_dir = "trained_models"
# the directory for saving checkpoints.
ckpt_dir = "trained_ckpts"
# the directory for loading checkpoint.
# If provided, continue training from the checkpoint.
ckpt_path = None
# the parameter to initialize the learning rate scheduler.
# It should be provided if use checkpoints, since the checkpoint doesn't
# include the training step counter currently.
start_step = 0
check_acc = True
data_path = expanduser("~") + (
"/.cache/paddle/dataset/test_dist_transformer/")
src_vocab_fpath = data_path + "vocab.bpe.32000"
trg_vocab_fpath = data_path + "vocab.bpe.32000"
train_file_pattern = data_path + "train.tok.clean.bpe.32000.en-de"
val_file_pattern = data_path + "newstest2013.tok.bpe.32000.en-de"
pool_size = 2000
sort_type = None
local = True
shuffle = False
shuffle_batch = False
special_token = ['<s>', '<e>', '<unk>']
token_delimiter = ' '
use_token_batch = False
class InferTaskConfig(object):
use_gpu = True
# the number of examples in one run for sequence generation.
batch_size = 10
# the parameters for beam search.
beam_size = 5
max_out_len = 256
# the number of decoded sentences to output.
n_best = 1
# the flags indicating whether to output the special tokens.
output_bos = False
output_eos = False
output_unk = True
# the directory for loading the trained model.
model_path = "trained_models/pass_1.infer.model"
class ModelHyperParams(object):
# These following five vocabularies related configurations will be set
# automatically according to the passed vocabulary path and special tokens.
# size of source word dictionary.
src_vocab_size = 10000
# size of target word dictionay
trg_vocab_size = 10000
# index for <bos> token
bos_idx = 0
# index for <eos> token
eos_idx = 1
# index for <unk> token
unk_idx = 2
# max length of sequences deciding the size of position encoding table.
# Start from 1 and count start and end tokens in.
max_length = 256
# the dimension for word embeddings, which is also the last dimension of
# the input and output of multi-head attention, position-wise feed-forward
# networks, encoder and decoder.
d_model = 512
# size of the hidden layer in position-wise feed-forward networks.
d_inner_hid = 2048
# the dimension that keys are projected to for dot-product attention.
d_key = 64
# the dimension that values are projected to for dot-product attention.
d_value = 64
# number of head used in multi-head attention.
n_head = 8
# number of sub-layers to be stacked in the encoder and decoder.
n_layer = 6
# dropout rate used by all dropout layers.
dropout = 0.0 # no random
# random seed used in dropout for CE.
dropout_seed = None
# the flag indicating whether to share embedding and softmax weights.
# vocabularies in source and target should be same for weight sharing.
weight_sharing = True
def merge_cfg_from_list(cfg_list, g_cfgs):
"""
Set the above global configurations using the cfg_list.
"""
assert len(cfg_list) % 2 == 0
for key, value in zip(cfg_list[0::2], cfg_list[1::2]):
for g_cfg in g_cfgs:
if hasattr(g_cfg, key):
try:
value = eval(value)
except Exception: # for file path
pass
setattr(g_cfg, key, value)
break
# The placeholder for batch_size in compile time. Must be -1 currently to be
# consistent with some ops' infer-shape output in compile time, such as the
# sequence_expand op used in beamsearch decoder.
batch_size = -1
# The placeholder for squence length in compile time.
seq_len = ModelHyperParams.max_length
# Here list the data shapes and data types of all inputs.
# The shapes here act as placeholder and are set to pass the infer-shape in
# compile time.
input_descs = {
# The actual data shape of src_word is:
# [batch_size * max_src_len_in_batch, 1]
"src_word": [(batch_size, seq_len, long_type(1)), "int64", 2],
# The actual data shape of src_pos is:
# [batch_size * max_src_len_in_batch, 1]
"src_pos": [(batch_size, seq_len, long_type(1)), "int64"],
# This input is used to remove attention weights on paddings in the
# encoder.
# The actual data shape of src_slf_attn_bias is:
# [batch_size, n_head, max_src_len_in_batch, max_src_len_in_batch]
"src_slf_attn_bias": [(batch_size, ModelHyperParams.n_head, seq_len,
seq_len), "float32"],
# The actual data shape of trg_word is:
# [batch_size * max_trg_len_in_batch, 1]
"trg_word": [(batch_size, seq_len, long_type(1)), "int64",
2], # lod_level is only used in fast decoder.
# The actual data shape of trg_pos is:
# [batch_size * max_trg_len_in_batch, 1]
"trg_pos": [(batch_size, seq_len, long_type(1)), "int64"],
# This input is used to remove attention weights on paddings and
# subsequent words in the decoder.
# The actual data shape of trg_slf_attn_bias is:
# [batch_size, n_head, max_trg_len_in_batch, max_trg_len_in_batch]
"trg_slf_attn_bias": [(batch_size, ModelHyperParams.n_head, seq_len,
seq_len), "float32"],
# This input is used to remove attention weights on paddings of the source
# input in the encoder-decoder attention.
# The actual data shape of trg_src_attn_bias is:
# [batch_size, n_head, max_trg_len_in_batch, max_src_len_in_batch]
"trg_src_attn_bias": [(batch_size, ModelHyperParams.n_head, seq_len,
seq_len), "float32"],
# This input |
bingshuika/hearthbreaker-new | hearthbreaker/cards/weapons/__init__.py | Python | mit | 594 | 0 | from hearthbreaker.cards.weapons.hunter import (
EaglehornBow,
GladiatorsLongbow,
| Glaivezooka,
)
from hearthbreaker.cards.weapons.paladin import (
LightsJustice,
SwordOfJustice,
TruesilverChampion,
Coghammer,
)
from hearthbreaker.cards.weapons.rogue import (
AssassinsBlade,
PerditionsBlade,
CogmastersWrench,
)
from hearthbreaker.cards.weapons.shaman import (
Doomhammer,
StormforgedAxe,
Powermace,
)
from hearthbreaker.cards.weapons.warrior import (
FieryWarAxe,
ArcaniteReaper,
Gorehowl,
DeathsBite,
Ogre | Warmaul,
)
|
efiring/numpy-work | numpy/core/numerictypes.py | Python | bsd-3-clause | 20,785 | 0.003464 | """numerictypes: Define the numeric type objects
This module is designed so 'from numerictypes import *' is safe.
Exported symbols include:
Dictionary with all registered number types (including aliases):
typeDict
Type objects (not all will be available, depends on platform):
see variable sctypes for which ones you have
Bit-width names
int8 int16 int32 int64 int128
uint8 uint16 uint32 uint64 uint128
float16 float32 float64 float96 float128 float256
complex32 complex64 complex128 complex192 complex256 complex512
c-based names
bool_
object_
void, str_, unicode_
byte, ubyte,
short, ushort
intc, uintc,
intp, uintp,
int_, uint,
longlong, ulonglong,
single, csingle,
float_, complex_,
longfloat, clongfloat,
As part of the type-hierarchy: xx -- is bit-width
generic
+-> bool_ (kind=b)
+-> number (kind=i)
| integer
| signedinteger (intxx)
| byte
| short
| intc
| intp int0
| int_
| longlong
+-> unsignedinteger (uintxx) (kind=u)
| ubyte
| ushort
| uintc
| uintp uint0
| uint_
| ulonglong
+-> inexact
| +-> floating (floatxx) (kind=f)
| | single
| | float_ (double)
| | longfloat
| \-> complexfloating (complexxx) (kind=c)
| csingle (singlecomplex)
| complex_ (cfloat, cdouble)
| clongfloat (longcomplex)
+-> flexible
| character
| str_ (string_) (kind=S)
| unicode_ (kind=U)
| void (kind=V)
|
\-> object_ (not used much) (kind=O)
"""
# we add more at the bottom
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
'maximum_sctype', 'issctype', 'typecodes', 'find_common_type']
from numpy.core.multiarray import typeinfo, ndarray, array, empty, dtype
import types as _types
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
from __builtin__ import bool, int, long, float, complex, object, unicode, str
# String-handling utilities to avoid locale-dependence.
# "import string" is costly to import!
# Construct the translation tables directly
# "A" = chr(65), "a" = chr(97)
_all_chars = map(chr, range(256))
_ascii_upper = _all_chars[65:65+26]
_ascii_lower = _all_chars[97:97+26]
LOWER_TABLE="".join(_all_chars[:6 | 5] + _ascii_lower + _all_chars[65+26:])
UPPER_TABLE="".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
#import string
# assert (string.maketrans(string.ascii_uppercase, string.ascii_lowercase) == \
# LOWER_TABLE)
# assert (string.maketrnas(string_ascii_lowerca | se, string.ascii_uppercase) == \
# UPPER_TABLE)
#LOWER_TABLE = string.maketrans(string.ascii_uppercase, string.ascii_lowercase)
#UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase)
def english_lower(s):
""" Apply English case rules to convert ASCII strings to all lower case.
This is an internal utility function to replace calls to str.lower() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
lowered : str
Examples
--------
>>> from numpy.core.numerictypes import english_lower
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_lower('')
''
"""
lowered = s.translate(LOWER_TABLE)
return lowered
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.core.numerictypes import english_upper
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
def english_capitalize(s):
""" Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy.core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
''
"""
if s:
return english_upper(s[0]) + s[1:]
else:
return s
sctypeDict = {} # Contains all leaf-node scalar types with aliases
sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences
allTypes = {} # Collect the types we will add to the module here
def _evalname(name):
k = 0
for ch in name:
if ch in '0123456789':
break
k += 1
try:
bits = int(name[k:])
except ValueError:
bits = 0
base = name[:k]
return base, bits
def bitname(obj):
"""Return a bit-width name for a given type object"""
name = obj.__name__
base = ''
char = ''
try:
if name[-1] == '_':
newname = name[:-1]
else:
newname = name
info = typeinfo[english_upper(newname)]
assert(info[-1] == obj) # sanity check
bits = info[2]
except KeyError: # bit-width name
base, bits = _evalname(name)
char = base[0]
if name == 'bool_':
char = 'b'
base = 'bool'
elif name=='string_':
char = 'S'
base = 'string'
elif name=='unicode_':
char = 'U'
base = 'unicode'
elif name=='void':
char = 'V'
base = 'void'
elif name=='object_':
char = 'O'
base = 'object'
bits = 0
bytes = bits / 8
if char != '' and bytes != 0:
char = "%s%d" % (char, bytes)
return base, bits, char
def _add_types():
for a in typeinfo.keys():
name = english_lower(a)
if isinstance(typeinfo[a], tuple):
typeobj = typeinfo[a][-1]
# define C-name and insert typenum and typechar references also
allTypes[name] = typeobj
sctypeDict[name] = typeobj
sctypeDict[typeinfo[a][0]] = typeobj
sctypeDict[typeinfo[a][1]] = typeobj
else: # generic class
allTypes[name] = typeinfo[a]
_add_types()
def _add_aliases():
for a in typeinfo.keys():
name = english_lower(a)
if not isinstance(typeinfo[a], tuple):
continue
typeobj = typeinfo[a][-1]
# insert bit-width version for this class (if relevant)
base, bit, char = bitname(typeobj)
if base[-3:] == 'int' or char[0] in 'ui': continue
if base != '':
myname = "%s%d" % (base, bit)
if (name != 'longdouble' and name != 'clongdouble') or \
myname not in allTypes.keys():
allTypes[myname] = typeobj
sctypeDict[myname] = typeobj
|
lingcheng99/LeetCode | AddBinary.py | Python | mit | 327 | 0.015291 | """
Add Binary
Given two binary strings, return their sum (also a binary string).
For example,
a = "11"
b = "1"
Return "100".
"""
clas | s Solution(object):
def addBina | ry(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
return bin(int(a,base=2)+int(b,base=2))[2:]
|
ToonTownInfiniteRepo/ToontownInfinite | toontown/building/DistributedBuildingMgrAI.py | Python | mit | 9,120 | 0.001535 | from direct.directnotify.DirectNotifyGlobal import *
from otp.ai.AIBaseGlobal import *
from toontown.building import DistributedBuildingAI
from toontown.building import GagshopBuildingAI
from toontown.building import HQBuildingAI
from toontown.building import KartShopBuildingAI
from toontown.building import PetshopBuildingAI
from toontown.hood import ZoneUtil
# from toontown.building import DistributedAnimBuildingAI
class DistributedBuildingMgrAI:
notify = directNotify.newCategory('DistributedBuildingMgrAI')
def __init__(self, air, branchId, dnaStore, trophyMgr):
self.air = air
self.branchId = branchId
self.canonicalBranchId = ZoneUtil.getCanonicalZoneId(self.branchId)
self.dnaStore = dnaStore
self.trophyMgr = trophyMgr
self.__buildings = {}
self.findAllLandmarkBuildings()
def cleanup(self):
for building in self.__buildings.values():
building.cleanup()
self.__buildings = {}
def isValidBlockNumber(self, blockNumber):
return blockNumber in self.__buildings
def isSuitBlock(self, blockNumber):
if not self.isValidBlockNumber(blockNumber):
return False
return self.__buildings[blockNumber].isSuitBlock()
def getSuitBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if building.isSuitBlock():
blocks.append(blockNumber)
return blocks
def getEstablishedSuitBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if building.isEstablishedSuitBlock():
blocks.append(blockNumber)
return blocks
def getToonBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if isinstance(building, HQBuildingAI.HQBuildingAI):
continue
if isinstance(building, GagshopBuildingAI.GagshopBuildingAI):
continue
if isinstance(building, PetshopBuildingAI.PetshopBuildingAI):
continue
if isinstance(building, KartShopBuildingAI.KartShopBuildingAI):
continue
if not building.isSuitBlock():
blocks.append(blockNumber)
return blocks
def getBuildings(self):
return self.__buildings.values()
def getFrontDoorPoint(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].getFrontDoorPoint()
def getBuildingTrack(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].track
def getBuilding(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber]
def setFrontDoorPoint(self, blockNumber, point):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].setFrontDoorPoint(point)
def getDNABlockLists(self):
blocks = []
hqBlocks = []
gagshopBlocks = []
petshopBlocks = []
kartshopBlocks = []
animBldgBlocks = []
for i in xrange(self.dnaStore.getNumBlockNumbers()):
blockNumber = self.dnaStore.getBlockNumberAt(i)
buildingType = self.dnaStore.getBlockBuildingType(blockNumber)
if buildingType == 'hq':
hqBlocks.append(blockNumber)
elif buildingType == 'gagshop':
gagshopBlocks.append(blockNumber)
elif buildingType == 'petshop':
petshopBlocks.append(blockNumber)
elif buildingType == 'kartshop':
kartshopBlocks.append(blockNumber)
elif buildingType == 'animbldg':
animBldgBlocks.append(blockNumber)
else:
blocks.append(blockNumber)
return (blocks, hqBlocks, gagshopBlocks, petshopBlocks, kartshopBlocks,
animBldgBlocks)
def findAllLandmarkBuildings(self):
backups = simbase.backups.load('blockinfo', (self.air.districtId, self.branchId), default={})
(blocks, hqBloc | ks, gagshopBlocks, petshopBlocks, kartshopBlocks,
| animBldgBlocks) = self.getDNABlockLists()
for blockNumber in blocks:
self.newBuilding(blockNumber, backup=backups.get(blockNumber, None))
for blockNumber in animBldgBlocks:
self.newAnimBuilding(blockNumber, backup=backups.get(blockNumber, None))
for blockNumber in hqBlocks:
self.newHQBuilding(blockNumber)
for blockNumber in gagshopBlocks:
self.newGagshopBuilding(blockNumber)
for block in petshopBlocks:
self.newPetshopBuilding(block)
for block in kartshopBlocks:
self.newKartShopBuilding(block)
def newBuilding(self, blockNumber, backup=None):
building = DistributedBuildingAI.DistributedBuildingAI(
self.air, blockNumber, self.branchId, self.trophyMgr)
building.generateWithRequired(self.branchId)
if backup is not None:
state = backup.get('state', 'toon')
if ((state == 'suit') and simbase.air.wantCogbuildings) or (
(state == 'cogdo') and simbase.air.wantCogdominiums):
building.track = backup.get('track', 'c')
building.difficulty = backup.get('difficulty', 1)
building.numFloors = backup.get('numFloors', 1)
building.updateSavedBy(backup.get('savedBy'))
building.becameSuitTime = backup.get('becameSuitTime', time.mktime(time.gmtime()))
if (state == 'suit') and simbase.air.wantCogbuildings:
building.setState('suit')
elif (state == 'cogdo') and simbase.air.wantCogdominiums:
building.setState('cogdo')
else:
building.setState('toon')
else:
building.setState('toon')
else:
building.setState('toon')
self.__buildings[blockNumber] = building
return building
def newAnimBuilding(self, blockNumber, backup=None):
return self.newBuilding(blockNumber, backup=backup)
def newHQBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = HQBuildingAI.HQBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newGagshopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = GagshopBuildingAI.GagshopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newPetshopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = PetshopBuildingAI.PetshopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newKartShopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
int |
AlessandroSpallina/JASM | testclient/sockets.py | Python | gpl-3.0 | 578 | 0.025952 | import socket
class Socket:
def __init__(self,ipaddr="127.0.0.1",p | ort=9734):
self.ipaddr=ipaddr
self.port=port
self.sck=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.sck.connect((ipaddr,port))
def send(self,data):
defdata="Data-Size: {}\n\n{}".format(len(data),data)
return sel | f.sck.sendall(bytes(defdata.encode('utf-8')))
def recv(self,bufsize=1024):
return self.sck.recv(bufsize).decode("utf-8","replace")
def close(self):
self.sck.close()
if __name__ == '__main__':
exit(1)
|
dipen30/boxapi | box/error.py | Python | mit | 982 | 0.026477 | # box
# Copyright 2013-2014 Dipen Patel
# See LICENSE for details.
STATUSCODES = {
200 : "success",
201 : "created",
202 : "accepted",
204 : "no_content",
302 : "redirect",
304 : " | not_modified",
400 : "bad_request",
401 : "unauthorized",
40 | 3 : "forbidden",
404 : "not_found",
405 : "method_not_allowed",
409 : "conflict",
412 : "precondition_failed",
429 : "too_many_requests",
500 : "internal_server_error",
507 : "insufficient_storage"
}
ERRORCODES = (204,400,401,403,404,405,409,412,429,500,507)
class BoxError(Exception):
"""Box exception"""
def __init__(self, reason, response=None):
self.reason = unicode(reason)
self.response = response
Exception.__init__(self, reason)
def __str__(self):
return self.reason
|
annahs/atmos_research | AL_size_distrs.py | Python | mit | 14,993 | 0.048689 | import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
import matplotlib.pyplot as plt
import matplotlib.colors
from matplotlib import dates
import calendar
from scipy.optimize import curve_fit
start = datetime(2011,3,5,0)
end = datetime(2014,1,1,0)
interval = 48 #hours
compare_to_PAPI = False
show_plot = False
write_to_file = True
fit_bin_min = 10
fit_bin_max = 1000
bin_incr = 5
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
def myRound(x, base):
return int(base * round(float(x)/base))
def lognorm(x_vals, A, w, xc):
return A/(np.sqrt(2*math.pi)*w*x_vals)*np.exp(-(np.log(x_vals/xc))**2/(2*w**2))
def make_bin_dict():
new_dict = {}
for bin in range(min_BC_VED,max_BC_VED,bin_incr):
new_dict[bin | ] = [bin,(bin+bin_incr),0,0]
return new_dict
def calcuate_VED(bbhg_incand_pk_amp,bblg_incand_pk_amp, instr_id):
VED = np.nan
if instr_id == '58':
#HG
bbhg_mass_uncorr = 0.29069 + 1.49267E-4*bbhg_incand_pk_amp + 5.02184E-10*bbhg_incand_pk_amp*bbhg_incand_pk_amp
bbhg_mass_corr = bbhg_mass_uncorr/0.7 #AD correction factor is 0.7 +- 0.05
#LG
bblg_mass_uncorr = -0.15884 + 0.00176*bblg_incand_pk_amp + 3.19118E-8*bblg_incan | d_pk_amp*bblg_incand_pk_amp
bblg_mass_corr = bblg_mass_uncorr/0.7 #AD correction factor is 0.7 +- 0.05
if min_rBC_mass <= bbhg_mass_corr < 12.8:
VED = (((bbhg_mass_corr/(10**15*1.8))*6/math.pi)**(1/3.0))*10**7
elif 12.8 <= bblg_mass_corr < max_rBC_mass:
VED = (((bblg_mass_corr/(10**15*1.8))*6/math.pi)**(1/3.0))*10**7
if instr_id == '44':
bbhg_mass_uncorr = 0.18821 + 1.36864E-4*bbhg_incand_pk_amp + 5.82331E-10*bbhg_incand_pk_amp*bbhg_incand_pk_amp #SP244
bbhg_mass_corr = bbhg_mass_uncorr/0.7 #AD correction factor is 0.7 +- 0.05
if min_rBC_mass <= bbhg_mass_corr < max_rBC_mass:
VED = (((bbhg_mass_corr/(10**15*1.8))*6/math.pi)**(1/3.0))*10**7
if instr_id == '17':
bbhg_mass_uncorr = -0.017584 + 0.00647*bbhg_incand_pk_amp #SP217
bbhg_mass_corr = bbhg_mass_uncorr/0.7 #AD correction factor is 0.7 +- 0.05
if min_rBC_mass <= bbhg_mass_corr < max_rBC_mass:
VED = (((bbhg_mass_corr/(10**15*1.8))*6/math.pi)**(1/3.0))*10**7
return VED
def assemble_interval_data(retrieved_records):
interval_data_dict = {}
total_sample_vol = 0
ved_list = []
for row in retrieved_records:
ind_start_time = row[0]
ind_end_time = row[1]
bbhg_incand_pk_amp = row[2]
bblg_incand_pk_amp = row[3] #for SP2 #17 this field is also bbhg_incand_pk_amp (query is modified since there is no LG channel)
sample_flow = row[4] #in vccm
instr_id = row[5]
if sample_flow == None:
continue
if (ind_end_time-ind_start_time) > 500: #this accounts for the 10min gaps in sp2#17 sampling (1/10 mins) we wanmt to ignore the particles with the huge sample interval
continue
sample_vol = (sample_flow*(ind_end_time-ind_start_time)/60) #/60 b/c sccm and time in secs
total_sample_vol = total_sample_vol + sample_vol
VED = calcuate_VED(bbhg_incand_pk_amp,bblg_incand_pk_amp, instr_id)
ved_list.append(VED)
interval_data_dict['VED list'] = ved_list
interval_data_dict['sampled volume'] = total_sample_vol
interval_data_dict['instr'] = instr_id
return interval_data_dict
def make_binned_list(interval_data_dict):
raw_dia_list = interval_data_dict['VED list']
total_vol_sccm = interval_data_dict['sampled volume']
if total_vol_sccm == 0:
total_vol_sccm = np.nan
instr_id = interval_data_dict['instr']
bin_dict = make_bin_dict()
for dia in raw_dia_list:
for point in bin_dict:
LL_bin = bin_dict[point][0]
UL_bin = bin_dict[point][1]
if (LL_bin <= dia < UL_bin):
mass = ((dia/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
if instr_id == '44':
for count in range(0,8):
bin_dict[point][3] += mass
bin_dict[point][2] += 1
if instr_id == '58':
for count in range(0,10):
bin_dict[point][3] += mass
bin_dict[point][2] += 1
else:
bin_dict[point][3] += mass
bin_dict[point][2] += 1
dia_list = []
for point in bin_dict:
bin_ll = bin_dict[point][0]
bin_ul = bin_dict[point][1]
bin_mid = bin_ll + (bin_ul-bin_ll)/2
number = bin_dict[point][2]
number_conc = number/total_vol_sccm #in #/cm3
norm_number_conc = number_conc/(math.log(bin_ul)-math.log(bin_ll))
mass = bin_dict[point][3]
mass_conc = mass/total_vol_sccm #in #/cm3
norm_mass_conc = mass_conc/(math.log(bin_ul)-math.log(bin_ll))
dia_list.append([bin_ll,bin_ul,bin_mid,number,number_conc,norm_number_conc,mass,mass_conc,norm_mass_conc,total_vol_sccm])
dia_list.sort()
return dia_list
def fit_distr(data_to_fit_bins,data_to_fit_concs):
core_bin_midpoints = []
for bin in data_to_fit_bins:
if np.isnan(bin) == False:
core_bin_midpoints.append(bin)
core_mass_conc_norm = []
for conc in data_to_fit_concs:
if np.isnan(conc) == False:
core_mass_conc_norm.append(conc)
#fit
try:
popt, pcov = curve_fit(lognorm, np.array(core_bin_midpoints), np.array(core_mass_conc_norm), p0=(2000,0.6,150))
except Exception,e:
popt = [np.nan,np.nan,np.nan]
pcov = [np.nan,np.nan,np.nan]
print str(e)
print 'fit failure'
perr = np.sqrt(np.diag(pcov))
sigma = math.exp(popt[1])
return [popt, perr,sigma]
def calc_std_err_of_estimate(data_to_fit_bins,data_to_fit_mass_concs,mass_fit_coefficients):
comparison_list = []
i=0
for bin_mid in data_to_fit_bins:
norm_mass_conc = data_to_fit_mass_concs[i]
if norm_mass_conc > 0:
fit_val = lognorm(bin_mid, mass_fit_coefficients[0], mass_fit_coefficients[1], mass_fit_coefficients[2])
diff_to_fit = ((norm_mass_conc-fit_val)/norm_mass_conc)**2 #changed to normalize by value here becasue all are on diff scales
comparison_list.append(diff_to_fit)
i+=1
std_err_of_estimate = (np.sum(comparison_list)/len(comparison_list))**0.5
print 'std_err_of_estimate: ',std_err_of_estimate
return std_err_of_estimate
def get_PAPI_data():
PAPI_list = []
data_dir = 'F:/Alert/'+ str(start_time.year) +'/Reduced/' + datetime.strftime(datetime(start_time.year, start_time.month, start_time.day),'%Y%m%d') + '/' #Alert data is in UTC - see email from Dan Veber
os.chdir(data_dir)
for file in os.listdir('.'):
if file.endswith('SizeDist.dat'):
print file
with open(file, 'r') as f:
temp = f.read().splitlines()
first_line = True
for line in temp:
if first_line == True:
first_line = False
continue
newline = line.split()
bin_mid = float(newline[0])*1000
number = float(newline[1])
mass = float(newline[2])
print number
PAPI_list.append([bin_mid,mass,number])
return PAPI_list
def write_files(core_bin_mids,core_norm_mass_concs,core_norm_numb_concs,fit_bin_mids,mass_fit_points,numb_fit_points,ratio_uncertainty,mass_fit_sigma):
distr_list = []
i=0
for fit_bin in fit_bin_mids:
if fit_bin in core_bin_mids:
meas_index = core_bin_mids.index(fit_bin)
meas_mass_val = core_norm_mass_concs[meas_index]
meas_numb_val = core_norm_numb_concs[meas_index]
#print fit_bin,meas_index,meas_mass_val
else:
meas_mass_val = np.nan
meas_numb_val = np.nan
if meas_mass_val == 0:
meas_mass_val = np.nan
if meas_numb_val == 0:
meas_numb_val = np.nan
distr_list.append([fit_bin+0.5,meas_mass_val,mass_fit_points[i],meas_numb_val,numb_fit_points[i]])
i+=1
#mass ratio
meas_area = np.nansum(core_norm_mass_concs)
fit_area = np.nansum(mass_fit_points)
ratio = meas_area/fit_area
file = open('C:/Users/Sarah Hanna/Documents/Data/Alert Data/distributions/mass and number distributions 48hr/rBC distributions for '+str(datetime.date(start_time))+'.txt', 'w')
file.write('fraction of mass distribution measured (area under measured curve/area under fit curve)= ' + str(ratio) + ' +- ' + str(round(ratio_uncertainty,3)) + '\n')
file.write('mass fit sigma: ' + str(mass_fit_sigma) + '\n')
file.write('bin_midpoints(nm)\tmeas_mass_conc(dM/dlogD-ng/m3)\tfit_mass_conc(dM/dlogD |
ruchee/vimrc | vimfiles/bundle/vim-python/submodules/snowball_py/snowballstemmer/dutch_stemmer.py | Python | mit | 23,184 | 0.002459 | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class DutchStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"", -1, 6),
Among(u"\u00E1", 0, 1),
Among(u"\u00E4", 0, 1),
Among(u"\u00E9", 0, 2),
Among(u"\u00EB", 0, 2),
Among(u"\u00ED", 0, 3),
Among(u"\u00EF", 0, 3),
Among(u"\u00F3", 0, 4),
Among(u"\u00F6", 0, 4),
Among(u"\u00FA", 0, 5),
Among(u"\u00FC", 0, 5)
]
a_1 = [
Among(u"", -1, 3),
Among(u"I", 0, 2),
Among(u"Y", 0, 1)
]
a_2 = [
Among(u"dd", -1, -1),
Among(u"kk", -1, -1),
Among(u"tt", -1, -1)
]
a_3 = [
Among(u"ene", -1, 2),
Among(u"se", -1, 3),
Among(u"en", -1, 2),
Among(u"heden", 2, 1),
Among(u"s", -1, 3)
]
a_4 = [
Among(u"end", -1, 1),
Among(u"ig", -1, 2),
Among(u"ing", -1, 1),
Among(u"lijk", -1, 3),
Among(u"baar", -1, 4),
Among(u"bar", -1, 5)
]
a_5 = [
Among(u"aa", -1, -1),
Among(u"ee", -1, -1),
Among(u"oo", -1, -1),
Among(u"uu", -1, -1)
]
g_v = [17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128]
g_v_I = [1, 0, 0, 17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128]
g_v_j = [17, 67, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128]
I_p2 = 0
I_p1 = 0
B_e_found = False
def copy_from(self, other):
self.I_p2 = other.I_p2
self.I_p1 = other.I_p1
self.B_e_found = other.B_e_found
super.copy_from(other)
def r_prelude(self):
# (, line 41
# test, line 42
v_1 = self.cursor
# repeat, line 42
try:
while True:
try:
v_2 = self.cursor
try:
# (, line 42
# [, line 43
self.bra = self.cursor
# substring, line 43
among_var = self.find_among(DutchStemmer.a_0, 11)
if among_var == 0:
raise lab2()
# ], line 43
self.ket = self.cursor
if among_var == 0:
raise lab2()
elif among_var == 1:
# (, line 45
# <-, line 45
if not self.slice_from(u"a"):
return False
elif among_var == 2:
# (, line 47
# <-, line 47
if not self.slice_from(u"e"):
return False
elif among_var == 3:
# (, line 49
# <-, line 49
if not self.slice_from(u"i"):
return False
elif among_var == 4:
# (, line 51
# <-, line 51
if not self.slice_from(u"o"):
return False
| elif among_var == 5:
# (, line 53
# <-, line 53
if not self.slice_from(u"u"):
return False
elif among_va | r == 6:
# (, line 54
# next, line 54
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_2
raise lab0()
except lab1: pass
except lab0: pass
self.cursor = v_1
# try, line 57
v_3 = self.cursor
try:
# (, line 57
# [, line 57
self.bra = self.cursor
# literal, line 57
if not self.eq_s(1, u"y"):
self.cursor = v_3
raise lab3()
# ], line 57
self.ket = self.cursor
# <-, line 57
if not self.slice_from(u"Y"):
return False
except lab3: pass
# repeat, line 58
try:
while True:
try:
v_4 = self.cursor
try:
# goto, line 58
try:
while True:
v_5 = self.cursor
try:
# (, line 58
if not self.in_grouping(DutchStemmer.g_v, 97, 232):
raise lab8()
# [, line 59
self.bra = self.cursor
# or, line 59
try:
v_6 = self.cursor
try:
# (, line 59
# literal, line 59
if not self.eq_s(1, u"i"):
raise lab10()
# ], line 59
self.ket = self.cursor
if not self.in_grouping(DutchStemmer.g_v, 97, 232):
raise lab10()
# <-, line 59
if not self.slice_from(u"I"):
return False
raise lab9()
except lab10: pass
self.cursor = v_6
# (, line 60
# literal, line 60
if not self.eq_s(1, u"y"):
raise lab8()
# ], line 60
self.ket = self.cursor
# <-, line 60
if not self.slice_from(u"Y"):
return False
except lab9: pass
self.cursor = v_5
raise lab7()
except lab8: pass
self.cursor = v_5
if self.cursor >= self.limit:
raise lab6()
self.cursor += 1
except lab7: pass
raise lab5()
except lab6: pass
self.cursor = v_4
raise lab4()
except lab5: pass
except lab4: pass
return True
def r_mark_regions(self):
# (, line 64
self.I_p1 = self.limit;
self.I_p2 = self.limit;
# gopast, line 69
try:
while True:
try:
if not self.in_grouping(DutchStemmer.g_v, 97, 232):
raise lab1()
raise lab0()
except lab1: pass
if self.cursor >= self.limit:
return False
self.cursor |
endlessm/chromium-browser | third_party/catapult/dashboard/dashboard/services/crrev_service_test.py | Python | bsd-3-clause | 1,606 | 0.000623 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
import unittest
import mock
from dashboard.servic | es import crrev_service
@mock.patch('dashboard.services.request.Request')
class CrrevServiceTest(unittest.TestCase):
def testGetNumbering(self, mock_request):
params = {
'number': '498032',
'numbering_identifier': 'refs/heads/master',
'numbering_type': 'COMMIT_POSITION',
'projec | t': 'chromium',
'repo': 'chromium/src'
}
return_value = {
'git_sha': '4c9925b198332f5fbb82b3edb672ed55071f87dd',
'repo': 'chromium/src',
'numbering_type': 'COMMIT_POSITION',
'number': '498032',
'project': 'chromium',
'numbering_identifier': 'refs/heads/master',
'redirect_url': 'https://chromium.googlesource.com/chromium/src/+/foo',
'kind': 'crrev#numberingItem',
'etag': '"z28iYHtWcY14RRFEUgin0OFGLHY/au8p5YtferYwojQRpsPavK6G5-A"'
}
mock_request.return_value = json.dumps(return_value)
self.assertEqual(
crrev_service.GetNumbering(**params),
return_value)
mock_request.assert_called_once_with(
'https://cr-rev.appspot.com/_ah/api/crrev/v1/get_numbering', 'GET',
project='chromium', repo='chromium/src', number='498032',
numbering_type='COMMIT_POSITION',
numbering_identifier='refs/heads/master')
|
Bihaqo/exp-machines | src/TTRegression.py | Python | mit | 13,433 | 0.001266 | from sklearn.linear_model.base import BaseEstimator, LinearClassifierMixin
import sklearn
import numpy as np
from copy import deepcopy
from utils import roc_auc_score_reversed
import tt
import logging
class TTRegression(BaseEstimator, LinearClassifierMixin):
"""This class alows to optimize functions of the following structure:
sum_i f(<w, g(x_i)> + b, y_i) + lambda <w, w> / 2
where the sum is over the objects in the dataset, w is a tensor in the TT-format.
Parameters
----------
tt_model : {'all-subsets'}
loss_name : {'logistic', 'hinge', 'mse'}
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
max_iter : int, default: 100
solver : {'riemannian-sgd', 'sgd'}
Algorithm to use in the optimization problem.
batch_size : Positive integer OR -1
-1 is for the full gradient, that is using the whole training set in
each batch.
reg : float, default: 0
L2 regularization coefficient.
WARNING: reg parameter means different things for different solvers.
Riemannian-sgd assumes L2 regularization in terms of the tensor w:
reg * <w, w> / 2
while sgd solver assumes regularization in terms of the cores elements:
reg * <w.core, w.core> / 2
verbose : int
Set verbose to any positive number for verbosity.
Attributes
----------
coef_ : TT-tensor
intercept_ : real
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
logger : instance of the class Logging
Contains all the logged details (e.g. loss on each iteration).
"""
def __init__(self, tt_model, loss_name, rank,
solver='riemannian-sgd', batch_size=-1, fit_intercept=True,
reg=0., exp_reg=1.0, dropout=None, max_iter=100, verbose=0,
persuit_init=False, coef0=None, intercept0=None):
# Save all the params as class attributes. It's required by the
# BaseEstimator class.
self.tt_model = tt_model
self.loss_name = loss_name
self.rank = rank
self.solver = solver
self.batch_size = batch_size
self.fit_intercept = fit_intercept
self.reg = reg
self.exp_reg = exp_reg
self.dropout = dropout
self.max_iter = max_iter
self.verbose = verbose
self.persuit_init = persuit_init
self.coef0 = coef0
self.intercept0 = intercept0
def parse_params(self):
"""Checks the parameters and sets class attributes according to them.
This have to be done on each call to fit(), since parameters can change
after __init__ via BaseEstimator.set_params method.
"""
if self.reg < 0:
raise ValueError("Regularization should be positive.")
if self.exp_reg < 1.:
raise ValueError("Exponential regularization should be greater than 1.0")
if np.abs(self.reg) < 1e-12 and np.abs(self.exp_reg - 1.) > 1e-12:
print('WARNING: exp_reg has no effect without reg. '
'The regularization works like this:\n'
'reg * (W_00^2 + exp_reg * W_01^2 + exp_reg * W_10^2 + '
'exp_reg^2 * W_11^2)')
self.watched_metrics = {}
if self.tt_model == 'all-subsets':
import models.all_subsets as all_subsets
self.tt_dot = all_subsets.vectorized_tt_dot
self.project = all_subsets.project_all_subsets
self.tensorize_linear_init = all_subsets.tensorize_linear_init
self.gradient_wrt_cores = all_subsets.gradient_wrt_cores
self.object_tensor = all_subsets.subset_tensor
else:
raise ValueError("Only all-subsets model is supported.")
if self.loss_name == 'logistic':
import objectives.logistic as logistic
self.loss = logistic.binary_logistic_loss
self.loss_grad = logistic.binary_logistic_loss_grad
self.preprocess = logistic.preprocess
self.linear_init = logistic.linear_init
self.watched_metrics = {
"logistic": self.loss,
"auc": roc_auc_score_reversed
}
elif self.loss_name == 'mse':
# TODO: MSE loss fluctuates instead of steadily improving. Debug!
# The possible reason is the lack of regularization (norm of w
# goes to 1e10 and machine errors become too large).
import objectives.mse as mse
self.loss = mse.mse_loss
self.loss_grad = mse.mse_loss_grad
self.preprocess = mse.preprocess
self.linear_init = mse.linear_init
self.watched_metrics = {
"mse": self.loss
}
elif self.loss_name == 'hinge':
from objectives import hinge
self.loss = hinge.hinge_loss
self.loss_grad = hinge.hinge_loss_grad
self.preprocess = hinge.preprocess
self.linear_init = hinge.linear_init
self.watched_metrics = {
"hinge": self.loss,
"auc": roc_auc_score_reversed
}
else:
raise ValueError("Only logistic, mse and hinge losses are supported.")
def fit(self, X_, y_):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training object-feature matrix, where n_samples in the number
of samples and n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
self.fit_log_val(X_, y_)
def fit_log_val(self, X_, y_, val_X_=None, val_y_=None):
"""Fit the model according to the given training data. Log validation loss on each epoch.
Parameters
----------
X_ : {array-like}, shape (n_samples, n_features)
Training object-feature matrix, where n_samples in the number
of samples and n_features is the number of features.
y_ : array-like, shape (n_samples,)
Target vector relative to X_.
val_X_ : {array-like}, shape (n_val_samples, n_features)
Validation object-feature matrix.
val_y_ : array-like, shape (n_val_samples,)
Target vector relative to val_X_.
Returns
-------
self : object
Returns self.
"""
self.parse_params()
self.logger = logging.Logging(self.verbose, self.watched_metrics, log_w_norm=True)
if np.abs(self.reg) > 1e-10 and self.logger.disp():
print('WARNING: reg parameter means different things for different solvers.\n'
'Riemannian-sgd assumes L2 regularization in terms of the tensor w:\n'
'\treg * <w | , w> / 2\n'
'while sgd solver assumes regularization in terms of the cores elements:\n'
'\treg * <w.core, w.core> / 2\n')
if self.persuit_init and self.coef0 is not None:
if self.logger.disp():
print('WARNING: persuit_init parameter is not compatible with '
'explicitly pro | viding initial values.')
# TODO: deal with sparse data.
# Copy the dataset, since preprocessing changes user's data, which is messy.
X = deepcopy(X_)
y = deepcopy(y_)
X, y, self.info = self.preprocess(X, y)
if val_X_ is not None and val_y_ is not None:
val_X = deepcopy(val_X_)
val_y = deepcopy(val_y_)
val_X, val_y, self.info = self.preprocess(val_X, val_y, self.info)
else:
val_X, val_y = None, None
if self.coef0 is None:
self.coef_, self.intercept_ = self.linear_init(X, y)
# Convert coefficients of linear model into the TT-fo |
ChenglongChen/Kaggle_HomeDepot | Code/Chenglong/feature_base.py | Python | mit | 8,989 | 0.007342 | # -*- coding: utf-8 -*-
"""
@author: Chenglong Chen <c.chenglong@gmail.com>
@brief: base class for feature generation
"""
import os
import sys
import numpy as np
import config
from config import TRAIN_SIZE
from utils import np_utils, pkl_utils
# Since we have many features that measure the correlation/similarity/distance
# between search_term and product_title/product_description, we implement this base class.
class BaseEstimator:
def __init__(self, obs_corpus, target_corpus, aggregation_mode, id_list=None, aggregation_mode_prev=""):
self.obs_corpus = obs_corpus
self.N = len(obs_corpus)
# for standalone feature, we use the same interface, so better take care of it
self.target_corpus = range(self.N) if target_corpus is None else target_corpus
# id_list is used for group based relevance/distance features
self.id_list = range(self.N) if id_list is None else id_list
# aggregation for list features, e.g., intersect positions
self.aggregation_mode, self.aggregator = self._check_aggregation_mode(aggregation_mode)
self.aggregation_mode_prev, self.aggregator_prev = self._check_aggregation_mode(aggregation_mode_prev)
self.double_aggregation = False
if self.aggregator_prev != [None]:
# the output of transform_one is a list of list, i.e., [[...], [...], [...]]
# self.aggregator_prev is used to aggregate the inner list
# This is used for the following features:
# 1. EditDist | ance_Ngram
# 2. CompressionDistance_Ngram
# 3. Word2Vec_CosineSim
# 4. WordNet_Path_Similarity, WordNet_Lch_Similarity, WordNet_Wup_Similarity
# which are very time consuming to compute the inner list
self.double_aggregation = True
def _check_aggregation_mode(self, aggre | gation_mode):
valid_aggregation_modes = ["", "size", "mean", "std", "max", "min", "median"]
if isinstance(aggregation_mode, str):
assert aggregation_mode.lower() in valid_aggregation_modes, "Wrong aggregation_mode: %s"%aggregation_mode
aggregation_mode = [aggregation_mode.lower()]
elif isinstance(aggregation_mode, list):
for m in aggregation_mode:
assert m.lower() in valid_aggregation_modes, "Wrong aggregation_mode: %s"%m
aggregation_mode = [m.lower() for m in aggregation_mode]
aggregator = [None if m == "" else getattr(np, m) for m in aggregation_mode]
return aggregation_mode, aggregator
def transform(self):
# original score
score = list(map(self.transform_one, self.obs_corpus, self.target_corpus, self.id_list))
# aggregation
if isinstance(score[0], list):
if self.double_aggregation:
# double aggregation
res = np.zeros((self.N, len(self.aggregator_prev) * len(self.aggregator)), dtype=float)
for m,aggregator_prev in enumerate(self.aggregator_prev):
for n,aggregator in enumerate(self.aggregator):
idx = m * len(self.aggregator) + n
for i in range(self.N):
# process in a safer way
try:
tmp = []
for l in score[i]:
try:
s = aggregator_prev(l)
except:
s = config.MISSING_VALUE_NUMERIC
tmp.append(s)
except:
tmp = [ config.MISSING_VALUE_NUMERIC ]
try:
s = aggregator(tmp)
except:
s = config.MISSING_VALUE_NUMERIC
res[i,idx] = s
else:
# single aggregation
res = np.zeros((self.N, len(self.aggregator)), dtype=float)
for m,aggregator in enumerate(self.aggregator):
for i in range(self.N):
# process in a safer way
try:
s = aggregator(score[i])
except:
s = config.MISSING_VALUE_NUMERIC
res[i,m] = s
else:
res = np.asarray(score, dtype=float)
return res
# Wrapper for generating standalone feature, e.g.,
# count of words in search_term
class StandaloneFeatureWrapper:
def __init__(self, generator, dfAll, obs_fields, param_list, feat_dir, logger, force_corr=False):
self.generator = generator
self.dfAll = dfAll
self.obs_fields = obs_fields
self.param_list = param_list
self.feat_dir = feat_dir
self.logger = logger
self.force_corr = force_corr
def go(self):
y_train = self.dfAll["relevance"].values[:TRAIN_SIZE]
for obs_field in self.obs_fields:
if obs_field not in self.dfAll.columns:
self.logger.info("Skip %s"%obs_field)
continue
obs_corpus = self.dfAll[obs_field].values
ext = self.generator(obs_corpus, None, *self.param_list)
x = ext.transform()
if isinstance(ext.__name__(), list):
for i,feat_name in enumerate(ext.__name__()):
dim = 1
fname = "%s_%s_%dD"%(feat_name, obs_field, dim)
pkl_utils._save(os.path.join(self.feat_dir, fname+config.FEAT_FILE_SUFFIX), x[:,i])
corr = np_utils._corr(x[:TRAIN_SIZE,i], y_train)
self.logger.info("%s (%dD): corr = %.6f"%(fname, dim, corr))
else:
dim = np_utils._dim(x)
fname = "%s_%s_%dD"%(ext.__name__(), obs_field, dim)
pkl_utils._save(os.path.join(self.feat_dir, fname+config.FEAT_FILE_SUFFIX), x)
if dim == 1:
corr = np_utils._corr(x[:TRAIN_SIZE], y_train)
self.logger.info("%s (%dD): corr = %.6f"%(fname, dim, corr))
elif self.force_corr:
for j in range(dim):
corr = np_utils._corr(x[:TRAIN_SIZE,j], y_train)
self.logger.info("%s (%d/%dD): corr = %.6f"%(fname, j+1, dim, corr))
# Wrapper for generating pairwise feature, e.g.,
# intersect count of words between search_term and product_title
class PairwiseFeatureWrapper:
def __init__(self, generator, dfAll, obs_fields, target_fields, param_list, feat_dir, logger, force_corr=False):
self.generator = generator
self.dfAll = dfAll
self.obs_fields = obs_fields
self.target_fields = target_fields
self.param_list = param_list
self.feat_dir = feat_dir
self.logger = logger
self.force_corr = force_corr
def go(self):
y_train = self.dfAll["relevance"].values[:TRAIN_SIZE]
for obs_field in self.obs_fields:
if obs_field not in self.dfAll.columns:
self.logger.info("Skip %s"%obs_field)
continue
obs_corpus = self.dfAll[obs_field].values
for target_field in self.target_fields:
if target_field not in self.dfAll.columns:
self.logger.info("Skip %s"%target_field)
continue
target_corpus = self.dfAll[target_field].values
ext = self.generator(obs_corpus, target_corpus, *self.param_list)
x = ext.transform()
if isinstance(ext.__name__(), list):
for i,feat_name in enumerate(ext.__name__()):
dim = 1
fname = "%s_%s_x_%s_%dD"%(feat_name, obs_field, target_field, dim)
pkl_utils._save(os.path.join(self.feat_dir, fname+config.FEAT_FILE_SUFFIX), x[:,i])
corr = np_utils._corr(x[:TRAIN_ |
cloudify-cosmo/cloudify-azure-plugin | examples/aks_service/scripts/store_kube_token.py | Python | apache-2.0 | 259 | 0 | impo | rt base64
from cloudify.state import ctx_parameters as inputs
from cloudify.manager import get_rest_client
client = get_rest_client()
client.secrets.cre | ate(
'kubernetes_token',
base64.b64decode(inputs['kube_token']),
update_if_exists=True)
|
christiano/pyArango | pyArango/index.py | Python | apache-2.0 | 1,257 | 0.043755 | import json
from theExceptions import (CreationError, DeletionError, UpdateError)
class Index(object) :
def __init__(self, collection, infos = None, creationData = None) :
"""An index on a collection's fields. Indexes have a .infos dictionnary that stores all the infos about | the index"""
self.collection = collection
self.connection = self.collection.database.connection
self.indexesURL = "%s/index" % self.collection.database.URL
self.infos = None
if infos :
self.infos = infos
elif creationData :
self._create(creationData)
if self.infos :
self.URL = "%s/%s" % (self.indexesURL, self.infos["id"])
|
def _create(self, postData) :
"""Creates an index of any type according to postData"""
if self.infos is None :
r = self.connection.session.post(self.indexesURL, params = {"collection" : self.collection.name}, data = json.dumps(postData))
data = r.json()
if (r.status_code >= 400) or data['error'] :
raise CreationError(data['errorMessage'], data)
self.infos = data
def delete(self) :
"""Delete the index"""
r = self.connection.session.delete(self.URL)
data = r.json()
if (r.status_code != 200 and r.status_code != 202) or data['error'] :
raise DeletionError(data['errorMessage'], data) |
airbnb/streamalert | streamalert_cli/terraform/alert_processor.py | Python | apache-2.0 | 2,636 | 0.002276 | """
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from streamalert.shared import ALERT_PROCESSOR_NAME
from streamalert_cli.terraform.common import infinitedict
from streamalert_cli.terraform.lambda_module import generate_lambda
def generate_alert_processor(config):
"""Generate Terraform for the Alert Processor
Args:
config (dict): The loaded config from the 'conf/' directory
Returns:
dict: Alert Processor dict to be marshaled to JSON
"""
prefix = config['global']['account']['prefix']
result = infinitedict()
# Set variables for the IAM permissions module
result['module']['alert_processor_iam'] = {
'source': './modules/tf_alert_processor_iam',
'account_id': config['global']['account']['aws_account_id'],
'region': config['global']['account']['region'],
'prefix': prefix,
'role_id': '${module.alert_processor_lambda.role_id}',
'kms_key_arn': '${aws_kms_key.streamalert_secrets.arn}',
'sse_kms_key_arn': '${aws_kms_key.server_side_encryption.arn}',
'output_lambda_functions': [
# Strip qualifiers: only the function | name is needed for the IAM permissions
func.split(':')[0] for func in list(config['outputs'].get('aws-lambda', {}).values())
],
'output_s3_buckets': list(config['outputs'].get('aws-s3', {}).values()),
'output_sns_topics': list(config['outputs'].get('aws-sns', {}).values()),
'output_sqs_queues': list(config['outputs'].get('aws-sqs', {}).values())
}
# Set variables for the Lambda module
result['module']['alert_processor_lamb | da'] = generate_lambda(
'{}_streamalert_{}'.format(config['global']['account']['prefix'], ALERT_PROCESSOR_NAME),
'streamalert.alert_processor.main.handler',
config['lambda']['alert_processor_config'],
config,
environment={
'ALERTS_TABLE': '{}_streamalert_alerts'.format(prefix),
'AWS_ACCOUNT_ID': config['global']['account']['aws_account_id'],
'STREAMALERT_PREFIX': prefix
}
)
return result
|
brandonmburroughs/food2vec | src/food2vec.py | Python | mit | 9,886 | 0.012341 | """Train a multi-class classification problem.
Use the embedding for each ingredient in a recipe to predict the rest
of the ingredients in the recipe.
Debug the model with a held-out validation set.
"""
import tensorflow as tf
import collections
import nomen
import numpy as np
layers = tf.contrib.layers
cfg = nomen.Config()
cfg.define_string('train_path', '../dat/recipes', 'training file')
cfg.define_string('save_path', '/tmp', 'save path')
cfg.define_integer('embedding_size', 100, 'size of the embeddings')
cfg.define_integer('epochs_to_train', 15, 'number of epochs to train')
cfg.define_float('learning_rate', 0.025, 'initial learning rate')
cfg.define_float('regularization', 0.01, 'regularization strength')
cfg.define_string('optimizer', 'adam', 'optimizer')
cfg.parse_args()
flatten = lambda l: [item for sublist in l for item in sublist]
def read_data(train_path):
sentences = []
with open(train_path, 'rb') as f:
for line in f:
sentences.append(line.rstrip().split())
return sentences
def build_dataset(sentences, min_count=0):
count = [['UNK', -1]]
sentences_flat = flatten(sentences)
counter = collections.Counter(sentences_flat)
n = len(counter)
filt = [(word, c) for word, c in counter.most_common(n) if c > min_count]
count.extend(filt)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for sentence in sentences:
sentence_ids = []
for word in sentence:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
sentence_ids.append(index)
data.append(sentence_ids)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
def build_train_validation(data, validation_fraction=0.1):
vad_idx = np.random.choice(
range(len(data)), int(validation_fraction * len(data)), replace=False)
raw_vad_data = [data[i] for i in vad_idx]
train_data = [data[i] for i in list(set(range(len(data))) - set(vad_idx))]
train_counts = collections.Counter(flatten(train_data))
vad_data = []
for vad_sentence in raw_vad_data:
if any(word not in train_counts for word in vad_sentence):
train_data.append(vad_sentence)
else:
vad_data.append(vad_sentence)
print('Split data into %d train and %d validation' %
(len(train_data), len(vad_data)))
vad_counts = collections.Counter(flatten(vad_data))
train_counts = collections.Counter(flatten(train_data))
return train_data, vad_data
def generate_batch(data, corpus_size, count, subsample=1e-3):
global sentence_index
global words_processed
raw_sentence = data[sentence_index]
if subsample == 0.:
sentence = raw_sentence
else:
sentence = []
for word_id in raw_sentence:
word_freq = count[word_id][1]
keep_prob = ((np.sqrt(word_freq / (subsample * corpus_size)) + 1) *
(subsample * corpus_size) / word_freq)
if np.random.rand() > keep_prob:
pass
else:
sentence.append(word_id)
if len(sentence) < 2:
sentence = raw_sentence
sentence_index = (sentence_index + 1) % len(data)
words_processed += len(sentence)
return get_sentence_inputs(sentence, len(count))
def get_sentence_inputs(sentence, vocabulary_size):
sentence_len = len(sentence)
sentence_set = set(sentence)
# context_size = sentence_len - 1
batch_size = sentence_len * (sentence_len - 1)
# batch = np.repeat(sentence, context_size)
batch = np.asarray(sentence, dtype=np.int32)
labels = np.asarray([list(sentence_set - set([w])) for w in sentence], dtype=np.int32)
# classes = [np.zeros(vocabulary_size, dtype=np.int32)] * sentence_len
# for i in range(len(raw_labels)):
# classes[i][raw_labels[i]] = 1
# classes = np.stack(classes)
return batch, labels
def train():
raw_data = read_data(cfg['train_path'])
data, count, dictionary, reverse_dictionary = build_dataset(raw_data)
train_data, vad_data = build_train_validation(data)
vocabulary_size = len(dictionary)
words_per_epoch = len(flatten(train_data))
sentences_per_epoch = len(train_data)
del raw_data # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[0][:10], [reverse_dictionary[i] for i in data[0][:10]])
global sentence_index
global words_processed
sentence_index = 0
words_processed = 0
print('example batch: ')
batch, labels = generate_batch(data, words_per_epoch, count)
for i in range(len(batch)):
print(batch[i], reverse_dictionary[batch[i]],
'->', [w for w in labels[i]], [reverse_dictionary[w] for w in labels[i]])
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick words in the head of the distribution
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[None])
train_labels = tf.placeholder(tf.int32, shape=[None, None])
train_indicators = tf.one_hot(
train_labels, | depth=vocabulary_size, on_v | alue=1, off_value=0, axis=1)
print(train_indicators)
train_indicators = tf.to_float(tf.reduce_sum(train_indicators, -1))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, cfg['embedding_size']], -1.0, 1.0))
example_emb = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the softmaxloss
sm_w_t = tf.Variable(
tf.zeros([vocabulary_size, cfg['embedding_size']]))
# tf.truncated_normal([vocabulary_size, cfg['embedding_size']],
# stddev=1.0 / np.sqrt(cfg['embedding_size'])))
sm_b = tf.Variable(tf.zeros([vocabulary_size]))
# logits: [batch_size, vocab_size]
logits = tf.matmul(example_emb, sm_w_t, transpose_b=True) + sm_b
# Compute the average loss for the batch.
log_lik = tf.reduce_mean(-tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits, targets=train_indicators))
regularizer_loss = (cfg['regularization'] * (
tf.nn.l2_loss(sm_w_t) + tf.nn.l2_loss(example_emb)))
loss = tf.reduce_mean(-log_lik) + regularizer_loss
# Construct the SGD optimizer using a decaying learning rate
words_processed_ph = tf.placeholder(tf.int32, [])
words_to_train = float(words_per_epoch * cfg['epochs_to_train'])
lr = cfg['learning_rate'] * tf.maximum(
0.0001, 1.0 - tf.cast(words_processed_ph, tf.float32) / words_to_train)
if cfg['optimizer'] == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(lr)
elif cfg['optimizer'] == 'adam':
optimizer = tf.train.AdamOptimizer(
lr, beta1=0.9, beta2=0.999, epsilon=1e-6)
train_op = optimizer.minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
num_steps = 1000001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
average_loss = 0.
sentences_to_train = cfg['epochs_to_train'] * len(data)
for step in range(num_steps):
if step < sentences_to_train:
batch_inputs, batch_labels = generate_batch(train_data, words_per_epoch, count)
feed_dict = {train_inputs: batch_inputs,
train_labels: batch_labels,
words_processed_ph: words_processed}
# We perform one update step by |
City-of-Helsinki/smbackend | services/models/__init__.py | Python | agpl-3.0 | 696 | 0 | from .accessibility_variable import AccessibilityVariable
from .department import Department
from .keyword import Keyword
from .notification import Announcement, ErrorMessage
from .service import Service, UnitServ | iceDetails
from .service_mapping import ServiceMapping
from .service_node import ServiceNode
from .unit import Unit
from .unit_accessibility_property import UnitAccessibilityProperty
from .unit_accessibility | _shortcomings import UnitAccessibilityShortcomings
from .unit_alias import UnitAlias
from .unit_connection import UnitConnection
from .unit_count import ServiceNodeUnitCount, ServiceUnitCount
from .unit_entrance import UnitEntrance
from .unit_identifier import UnitIdentifier
|
DerThorsten/nifty | src/python/examples/graph/plot_undirected_grid_graph_watersheds.py | Python | mit | 3,725 | 0.013423 | """
Edge/Node Weighted Watersheds
====================================
Compare edge weighted watersheds
and node weighted on a grid graph.
"""
####################################
# sphinx_gallery_thumbnail_number = 5
from __future__ import print_function
import nifty.graph
import skimage.data
import skimage.segmentation
import vigra
import matplotlib
import pylab
import numpy
# increase default figure size
a,b = pylab.rcParams['figure.figsize']
pylab.rcParams['figure.figsize'] = 2.0*a, 2.0*b
####################################
# load some image
img = skimage.data.astronaut().astype('float32')
shape = img.shape[0:2]
#plot the image
pylab.imshow(img/255)
pylab.show()
################################################
# get some edge indicator
taggedImg = vigra.taggedView(img,'xyc')
edgeStrength = vigra.filters.structureTensorEigenvalues(taggedImg, 1.5, 1.9)[:,:,0]
edgeStrength = edgeStrength.squeeze()
edgeStrength = numpy.array(edgeStrength)
pylab.imshow(edgeStrength)
pylab.show()
###################################################
# get seeds via local minima
seeds = vigra.analysis.localMinima(edgeStrength)
seeds = vigra.analysis.labelImageWithBackground(seeds)
# plot seeds
cmap = numpy.random.rand ( seeds.max()+1,3)
cmap[0,:] = 0
cmap = matplotlib.colors.ListedColormap ( cmap)
pylab.imshow(seeds, cmap=cmap)
pylab.show()
#########################################
# grid graph
gridGraph = nifty.graph.undirectedGridGraph(shape)
#########################################
# run node weighted watershed algorithm
oversegNodeWeighted = nifty.graph.nodeWeightedWatershedsSegmentation(graph=gridGraph, seeds=seeds.ravel(),
nodeWeights=edgeStrength.ravel())
oversegNodeWeighted = oversegNodeWeighted.reshape(shape)
#########################################
# run edge weighted watershed algorithm
gridGraphEdgeStrength = gridGraph.imageToEdgeMap(edgeStrength, mode='sum')
numpy.random.permutation(gridGraphEdgeStrength)
oversegEdgeWeightedA = nifty.graph.edgeWeightedWatershedsSegmentation(graph=gridGraph, seeds=seeds.ravel(),
edgeWeights=gridGraphEdgeStrength)
oversegEdgeWeightedA = oversegEdgeWeightedA.reshape(shape)
#########################################
# run edge weighted watershed algorithm
# on interpixel weights.
# To do so we need to resample the image
# and compute the edge indicator
# on the reampled image
interpixelShape = [2*s-1 for s in shape]
imgBig = vigra.sampling.resize(taggedImg, interpixelShape)
edgeStrength = vigra.filters.structureTensorEigenvalues(imgBig, 2*1.5, 2*1.9)[:,:,0]
edgeStrength = edgeStrength.squeeze()
edgeStrength = numpy.array(edgeStrength)
gridGraphEdgeStrength = gridGraph.imageToEdgeMap(edgeStrength, mode='interpixel')
oversegEdgeWeightedB = nifty.graph.edgeWeightedWatershedsSegmentation(
graph=gridGraph,
seeds=seeds.ravel(),
edgeWeights=gridGraphEdgeStrength)
oversegEdgeWeightedB = oversegEdgeWeightedB.reshape(shape)
#########################################
# plot results
f = pylab.figure()
f.add_subplot(2,2, 1)
b_img = skimage.segmentation.mark_boundaries(img/255,
oversegEdgeWeightedA.astype('uint32'), mode='inner', color=(0.1,0.1,0.2))
pylab.imshow(b_img)
pylab.title('Edge Weighted Watershed | (sum weights)')
f.add_subplot(2,2, 2)
b_img = skimage.segmentation.mark_boundaries(img/255,
oversegEdgeWeightedB.astype('uint32'), mode='inner', color=(0.1,0.1,0.2))
pylab.imshow(b_img)
pylab.title('Edge Weighted Watershed (interpixel weights)')
f.add_subplot(2,2, 3)
b_img = | skimage.segmentation.mark_boundaries(img/255,
oversegNodeWeighted.astype('uint32'), mode='inner', color=(0.1,0.1,0.2))
pylab.imshow(b_img)
pylab.title('Node Weighted Watershed')
pylab.show() |
CORE-GATECH-GROUP/serpent-tools | docs/magicPlotDoc.py | Python | mit | 646 | 0 | """
Write out magic strings to magicPlotDocDecorator
"""
from os.path import join
from sys import version_info
import serpentTools
pyVersion = '{}.{}.{}'.format(*version_info[:3])
magicStrings = serpentTools.plot.PLOT_MAGIC_STRINGS
magicOpts = [
'#. ``{key}``: {value}'.format(key=key, value=magicStrings[key] | )
for key in sorted(magicStrings.keys())]
targetFile = join('develop', 'magicPlotOpts.rst')
print("Making magic plot conversion options | with \n python: {}"
"\n serpentTools: {}".format(pyVersion, serpentTools.__version__))
with open(targetFile, 'w') as target:
target.write('\n'.join(magicOpts))
print(' done')
|
sharkdata/sharkdata | sharkdata_core/string_utils.py | Python | mit | 3,194 | 0.001252 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Copyright (c) 2013-present SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
from fnmatch import fnmatch
def extract_pattern_values(
string_to_parse,
pattern_strings,
pattern_var_start_sign="<",
pattern_var_stop_sign=">",
):
if type(pattern_strings) is list:
for pattern_string in pattern_strings:
identifier_values = extract_pattern_values(
string_to_parse,
pattern_string,
pattern_var_start_sign,
pattern_var_stop_sign,
)
if identifier_values != None:
break
return identifier_values
else:
pattern_string = pattern_strings
keys = get_pattern_keys(
pattern_string, pattern_var_start_sign, pattern_var_stop_sign
)
if not does_pattern_match(
string_to_parse,
pattern_string,
keys,
pattern_var_start_sign,
pattern_var_stop_sign,
):
return None
identifier_values = {}
file_name_parts = split_by_delimiters(
pattern_string, delimiters=[pattern_var_start_sign, pattern_var_stop_sign]
)
file_name_parts = [
part for part in file_name_parts if part != ""
] # remove empty parts
checked_part = ""
for index, part in enumerate(file_name_parts):
start_pos = len(checked_part)
if part in keys:
if index == len(file_name_parts) - 1:
| stop_pos = len(string_to_parse)
else:
next_part = file_name_parts[index + 1]
stop_pos = string_to_parse.find(next_part, start_pos + 1)
value_string = string_to_parse[start_pos:stop_pos]
identifier_values[part] = value_string
checked_part += value_string
else:
if not string_to_parse[st | art_pos:].startswith(part):
return None
checked_part += part
return identifier_values
def does_pattern_match(
string_to_parse,
pattern_string,
keys,
pattern_var_start_sign="<",
pattern_var_stop_sign=">",
):
match_string = pattern_string
for key in keys:
key_string = "%s%s%s" % (pattern_var_start_sign, key, pattern_var_stop_sign)
match_string = match_string.replace(key_string, "*")
return fnmatch(string_to_parse, match_string)
def get_pattern_keys(
pattern_string, pattern_var_start_sign="<", pattern_var_stop_sign=">"
):
keys = []
parts = pattern_string.split(pattern_var_start_sign)
for part in parts:
if pattern_var_stop_sign in part:
key_str, _ = part.split(pattern_var_stop_sign)
keys.append(key_str)
return keys
def split_by_delimiters(string_to_split, delimiters):
split_string = []
previous_split_string = [string_to_split]
for delimiter in delimiters:
split_string = []
for string_part in previous_split_string:
split_string.extend(string_part.split(delimiter))
previous_split_string = split_string
return split_string
|
alerta/python-alerta | tests/integration/test_groups.py | Python | mit | 937 | 0.001067 | import unittest
from alertaclient.api import Client
class AlertTestCase(unittest.TestCase):
def setUp(self):
self.client = Client(e | ndpoint='http://api:8080', key='demo-key')
def test | _group(self):
group = self.client.create_group(name='myGroup', text='test group')
group_id = group.id
self.assertEqual(group.name, 'myGroup')
self.assertEqual(group.text, 'test group')
group = self.client.update_group(group_id, name='newGroup', text='updated group text')
self.assertEqual(group.name, 'newGroup')
self.assertEqual(group.text, 'updated group text')
group = self.client.create_group(name='myGroup2', text='test group2')
groups = self.client.get_users_groups()
self.assertEqual(len(groups), 2, groups)
self.client.delete_group(group_id)
groups = self.client.get_users_groups()
self.assertEqual(len(groups), 1)
|
freesmartphone/framework | framework/cxutil/ip.py | Python | gpl-2.0 | 2,408 | 0.039452 | """
Misc utils for IPv4 management
"""
# Copyright (c) 2008 Peter V. Saveliev
#
# This file is part of Connexion project.
#
# Connexion is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Connexion is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Connexion; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
msk = []
for i in xrange(33):
a = 0
for k in xrange(i):
a = a >> 1
a |= 0x80000000
msk.append(a)
def dqn_to_bit(st):
"""
Convert dotted quad notation to /xx mask
"""
return msk.index(int(dqn_to_int(st)))
def bit_to_dqn(st):
"""
Convert /xx mask to dotted quad notation
"""
return int_to_dqn(msk[int(st)])
def dqn_to_int(st):
"""
Convert dotted quad notation to integer
"""
st = st.split(".")
###
# That is not so elegant as 'for' cycle and
# not extensible at all, but that works faster
###
return int("%02x%02x%02x%02x" % (int(st[0]),int(st[1]),int(st[2]),int(st[3])),16)
def int_to_dqn(st):
"""
Convert integer to dotted quad notation
"""
st = "%08x" % (st)
###
# The same issue as for `dqn_to_int()`
###
return "%i.%i.%i.%i" % (int(st[0:2],16),int(st[2:4],16),int(st[4:6],16),int(st[6:8],16))
def mask_unknown(st):
"""
Detect mask by zero bytes
"""
st = st.split(" | .")
st.reverse()
mask = 32
c = [32]
for i in st:
mask -= 8
if i == "0":
c.append(mask)
return c | [-1]
def get_mask(st):
"""
Return int mask for IP
"""
st = st.split("/")
if len(st) > 1:
mask = st[1]
if mask.find(".") > 0:
mask = dqn_to_int(mask)
else:
mask = msk[int(mask)]
else:
mask = msk[mask_unknown(st[0])]
return mask
def ip_range(st):
"""
Return IP list for a network
"""
mask = get_mask(st)
st = st.split("/")
ip = dqn_to_int(st[0])
###
#
###
net = ip & mask
start = 0
stop = msk[32] & ~mask
result = []
for i in xrange(start,stop + 1):
result.append((
hex(i),
int_to_dqn(net | i),
))
return result
|
toobaz/pandas | setup.py | Python | bsd-3-clause | 28,584 | 0.000455 | #!/usr/bin/env python
"""
Parts of this file were taken from the pyzmq project
(https://github.com/zeromq/pyzmq) which have been permitted for use under the
BSD license. Parts are from lxml (https://github.com/lxml/lxml)
"""
import os
from os.path import join as pjoin
import pkg_resources
import platform
from distutils.sysconfig import get_config_vars
import sys
import shutil
from distutils.version import LooseVersion
from setuptools import setup, Command, find_packages
# versioning
import versioneer
cmdclass = versioneer.get_cmdclass()
def is_platform_windows():
return sys.platform == "win32" or sys.platform == "cygwin"
def is_platform_mac():
return sys.platform == "darwin"
min_numpy_ver = "1.13.3"
setuptools_kwargs = {
"install_requires": [
"python-dateutil >= 2.6.1",
"pytz >= 2017.2",
"numpy >= {numpy_ver}".format(numpy_ver=min_numpy_ver),
],
"setup_requires": ["numpy >= {numpy_ver}".format(numpy_ver=min_numpy_ver)],
"zip_safe": False,
}
min_cython_ver = "0.28.2"
try:
import Cython
ver = Cython.__version__
from Cython.Build import cythonize
_CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver)
except ImportError:
_CYTHON_INSTALLED = False
cythonize = lambda x, *args, **kwargs: x # dummy func
# The import of Extension must be after the import of Cython, otherwise
# we do not get the appropriately patched class.
# See https://cython.readthedocs.io/en/latest/src/reference/compilation.html
from distutils.extension import Extension # noqa:E402
from distutils.command.build import build # noqa:E402
try:
if not _CYTHON_INSTALLED:
raise ImportError("No supported version of Cython installed.")
from Cython.Distutils.old_build_ext import old_build_ext as _build_ext
cython = True
except ImportError:
from distutils.command.build_ext import build_ext as _build_ext
cython = False
else:
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError:
raise ImportError("Building pandas requires Tempita: " "pip install Tempita")
_pxi_dep_template = {
"algos": [
"_libs/algos_common_helper.pxi.in",
"_libs/algos_take_helper.pxi.in",
"_libs/algos_rank_helper.pxi.in",
],
"groupby": ["_libs/groupby_helper.pxi.in"],
"hashtable": [
"_libs/hashtable_class_helper.pxi.in",
"_libs/hashtable_func_helper.pxi.in",
],
"index": ["_libs/index_class_helper.pxi.in"],
"sparse": ["_libs/sparse_op_helper.pxi.in"],
"interval": ["_libs/intervaltree.pxi.in"],
}
_pxifiles = []
_pxi_dep = {}
for module, files in _pxi_dep_template.items():
pxi_files = [pjoin("pandas", x) for x in files]
_pxifiles.extend(pxi_files)
_pxi_dep[module] = pxi_files
class build_ext(_build_ext):
@classmethod
def render_templates(cls, pxifiles):
for pxifile in pxifiles:
# build pxifiles first, template extension must be .pxi.in
assert pxifile.endswith(".pxi.in")
outfile = pxifile[:-3]
if (
os.path.exists(outfile)
and os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime
):
# if .pxi.in is not updated, no need to output .pxi
continue
with open(pxifile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
with open(outfile, "w") as f:
f.write(pyxcontent)
def build_extensions(self):
# if building from c files, don't need to
# generate template output
if cython:
self.render_templates(_pxifiles)
numpy_incl = pkg_resources.resource_filename("numpy", "core/include")
for ext in self.extensions:
if hasattr(ext, "include_dirs") and numpy_incl not in ext.include_dirs:
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
DESCRIPTION = (
"Powerful data structures for data analysis, time series, " "and statistics"
)
LONG_DESCRIPTION = """
**pandas** is a Python package providing fast, flexible, and expressive data
structures designed to make working with structured (tabular, multidimensional,
potentially heterogeneous) and time series data both easy and intuitive. It
aims to be the fundamental high-level building block for doing practical,
**real world** data analysis in Python. Additionally, it has the broader goal
of becoming **the most powerful and flexible open source data analysis /
manipulation tool available in any language**. It is already well on its way
toward this goal.
pandas is well suited for many different kinds of data:
- Tabular data with heterogeneously-typed columns, as in an SQL table or
Excel spreadsheet
- Ordered and unordered (not necessarily fixed-frequency) time series data.
- Arbitrary matrix data (homogeneously typed or heterogeneous) with row and
column labels
- Any other form of observational / statistical data sets. The data actually
need not be labeled at all to be placed into a pandas data structure
The two primary data structures of pandas, Series (1-dimensional) and DataFrame
(2-dimensional), handle the vast majority of typical use cases in finance,
statistics, social science, and many areas of engineering. For R users,
DataFrame provides everything that R's ``data.frame`` provides and much
more. pandas is built on top of `NumPy <http://www.numpy.org>`__ and is
intended to integrate well within a scientific computing environment with many
other 3rd party libraries.
Here are just a few of the things that pandas does well:
- Easy handling of **missing data** (represented as NaN) in floating point as
well as non-floating point data
- Size mutability: columns can be **inserted and deleted** from DataFrame and
higher dimensional objects
- Automatic and explicit **data alignment**: objects can be explicitly
aligned to a set of labels, or the user can simply ignore the labels and
let `Series`, `DataFrame`, etc. automatically align the data for you in
computations
- Powerful, flexible **group by** functionality to perform
split-apply-combine operations on data sets, for both aggregating and
transforming data
- Make it **easy to convert** ragged, differently-indexed data in other
Python and NumPy data structures into DataFrame objects
- Intelligent | label-based **slicing**, **fancy indexing**, and **subsetting**
of large data sets
- Intuitive **merging** and **joining** data sets
- Flexible **reshaping** and pivoting of data sets
- **Hierarchical** labeling of axes (possible to have multiple la | bels per
tick)
- Robust IO tools for loading data from **flat files** (CSV and delimited),
Excel files, databases, and saving / loading data from the ultrafast **HDF5
format**
- **Time series**-specific functionality: date range generation and frequency
conversion, moving window statistics, moving window linear regressions,
date shifting and lagging, etc.
Many of these principles are here to address the shortcomings frequently
experienced using other languages / scientific research environments. For data
scientists, working with data is typically divided into multiple stages:
munging and cleaning data, analyzing / modeling it, then organizing the results
of the analysis into a form suitable for plotting or tabular display. pandas is
the ideal tool for all of these tasks.
"""
DISTNAME = "pandas"
LICENSE = "BSD"
AUTHOR = "The PyData Development Team"
EMAIL = "pydata@googlegroups.com"
URL = "http://pandas.pydata.org"
DOWNLOAD_URL = ""
PROJECT_URLS = {
"Bug Tracker": "https://github.com/pandas-dev/pandas/issues",
"Documentation": "http://pandas.pydata.org/pandas-docs/stable/",
"Source Code": "https://github.com/pandas-dev/pandas",
}
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Programming Language :: Python",
" |
Dronolab/antenna-tracking | Sensors/imuAbstract.py | Python | mit | 1,320 | 0.00303 | import os
import sys
import RTIMU
import schedule
import GeneralSettings
from Utility.abstract_process import processAbstract
import time
class imuHandler(processAbstract):
#SETTINGS_FILE = GeneralSettings.IMU_SETTINGS_FILE
def __init__(self, antenna_data):
processAbstract.__init__(self)
self.antenna_data = antenna_data
self.ready = False
self.first = True
def job(self):
""" Abstract """
raise
def process(self):
print("Using settings file " + self.SETTINGS_FILE + ".ini")
#print("Settings file does not exist, will be created")
# if not os.path.exists(self.SETTINGS_FILE + ".ini"):
s = RTIMU.Settings(s | elf.SETTINGS_FILE)
self.imu = RTIMU.RTIMU(s)
if (not self.imu.IMUInit()):
print("IMU Init Failed", self.SETTINGS_FILE)
sys.exit(1)
else:
print("IMU Init Succeeded")
self.ready = True
# initialising fusion parameters
self.imu.setSlerpPower(0.02) |
self.imu.setGyroEnable(True)
self.imu.setAccelEnable(True)
self.imu.setCompassEnable(True)
self.poll_interval = self.imu.IMUGetPollInterval()
while self.kill_pill.empty():
self.job()
time.sleep(10 / 1000)
|
abalakh/robottelo | tests/foreman/ui/test_login.py | Python | gpl-3.0 | 1,168 | 0 | # -*- encoding: utf-8 -*-
"""Test class for Login UI"""
from ddt import ddt
from robottelo.decorators import data
from robottelo.helpers import gen_string
from robottelo.test import UITestCase
@ddt
class Login(UITestCase):
"""Implements the login tests rom UI"""
def test_successful_login(self):
"""@Test: Login as an admin user
@Feature: Login - Positive
| @Assert: Successfully logged in as an admin user
"""
self.login.login(self.katello_user, self.katello_passwd)
self.assertTrue(self.login.is_logged())
@data(
{u'login': 'admin', u'pass': ''},
{u'login': '', u'pass': 'mypassword'},
{u'login': '', u'pass': | ''},
{u'login': gen_string('alpha', 300), u'pass': ''},
{u'login': gen_string('alpha', 300),
u'pass': gen_string('alpha', 300)},
)
def test_failed_login(self, test_data):
"""@Test: Login into application using invalid credentials
@Feature: Login - Negative
@Assert: Fails to login
"""
self.login.login(test_data['login'], test_data['pass'])
self.assertFalse(self.login.is_logged())
|
TheManaWorld-Ger/server-data | tools/showvars.py | Python | gpl-2.0 | 2,535 | 0.035108 | #!/usr/bin/python
# must be started in the npc dir
import os
import re
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False,
help="show the occur | rences of that var")
parser.add_option("-f", "--file", dest="fname", default="",
help="inspect that file", metavar="FILE")
parser.add_option("-l", "--localvariables", dest="localvars", action="store_true", default=False,
help="show local variables as well")
(options, args) = parser.parse_args()
def handleFile(fname):
f = open(fname)
lines = f.readlines();
f.close()
rm=[]
for l in lines:
#remov | e comments
line = l.split(r"//")[0]
sp = line.split()
# no set command?
if not "set" in sp:
continue
# ignore those lines printing messages
if 'mes "' in line:
continue
#ignore anything before the "set" command:
sp = sp[sp.index("set")+1:]
line = "".join(sp)
endpos = line.find(",")
#check for comma
if endpos>0:
#ok its a oneliner, the comma is in the same line:
varname = line[0:endpos].strip()
assignment = line[endpos+1:].strip()[:-1] # remove semicolon
if assignment != "0":
if varname.startswith("@") and not options.localvars:
continue
if varname.startswith("$"):
continue
if varname in allvars:
if not fname in allvars[varname]:
allvars[varname] += [fname]
else:
allvars[varname] = [fname]
else:
#print fname
if fname == "." + os.sep + "functions" + os.sep + "clear_vars.txt":
rm += [varname]
else:
# ok error, you need to check manually:
print "\tline:\t",line
return rm
if options.fname:
path=options.fname
else:
path=".."+os.sep+"npc"
allvars = {}
rmvars = []
print "please check manully for vars in here:"
os.chdir(path)
for tpl in os.walk("."):
for fname in tpl[2]:
rmvars += handleFile(tpl[0]+os.sep+fname)
unusedcounter=0
usedcounter=0
print "These variables are found in the scripts, which are deleted in clear_vars"
for var in allvars:
if not var in rmvars:
continue
unusedcounter+=1
print "\t",var
if options.verbose:
for fname in allvars[var]:
print "\t","\t", fname
print "These variables are valid variables of the scripts:"
for var in allvars:
if var in rmvars:
continue
usedcounter+=1
print "\t",var
if options.verbose:
for fname in allvars[var]:
print "\t","\t", fname
print "number of vars used:", usedcounter
print "number of vars cleared:", unusedcounter
|
rrahn/gdf_tools | include/seqan/apps/tree_recon/tests/run_tests.py | Python | gpl-3.0 | 3,621 | 0.001657 | #!/usr/bin/env python
"""Execute the tests for the tree_recomb program.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for tree_recomb'
print '=============================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'apps/tree_recon/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'apps/tree_recon', 'tree_recon')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the outpu | t
# was generated in generate_outputs.sh.
conf_list = []
for i in [1, 2, 3]:
conf = app_tests.TestConf(
program=path_to_program,
args= | ['-m', ph.inFile('example%d.dist' % i),
'-o', ph.outFile('example%d.dot' % i)],
to_diff=[(ph.inFile('example%d.dot' % i),
ph.outFile('example%d.dot' % i))])
conf_list.append(conf)
for i in [1, 2, 3]:
for b in ['nj', 'min', 'max', 'avg', 'wavg']:
if i == 1 and b == 'avg':
continue # Skip, rounding problems MSVC vs GCC.
conf = app_tests.TestConf(
program=path_to_program,
args=['-b', b,
'-m', ph.inFile('example%d.dist' % i),
'-o', ph.outFile('example%d.%s.dot' % (i, b))],
to_diff=[(ph.inFile('example%d.%s.dot' % (i, b)),
ph.outFile('example%d.%s.dot' % (i, b)))])
conf_list.append(conf)
for i in [1, 2, 3]:
for f in ['dot', 'newick']:
conf = app_tests.TestConf(
program=path_to_program,
args=['-m', ph.inFile('example%d.dist' % i),
'-o', ph.outFile('example%d.%s' % (i, f))],
to_diff=[(ph.inFile('example%d.%s' % (i, f)),
ph.outFile('example%d.%s' % (i, f)))])
conf_list.append(conf)
# Execute the tests.
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(['tree_recomb'] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
|
ealogar/curso-python | advanced/fib_fac.py | Python | apache-2.0 | 1,159 | 0.004314 | #-*- coding: utf-8 -*-
def factorial(n):
"""Return the factorial of n"""
if n < 2:
return 1
return n * factorial(n - 1)
def fibonacci(n):
"""Return the nth fibonacci number"""
if n < 2:
return n
return fibonacci(n - 1) + fibonacci(n - 2)
def fib_fac(x=30, y=900 | ):
fib = fibonacci(x)
fac = factorial(y)
print "fibonacci({}):".format(x), fib
print "factorial({}):".format(y), fac
if __name__ == "__main__":
def opc1():
fruits = tuple(str(i) for i in xrange(100))
out = ''
for fruit in fruits:
out += fruit +':'
return out
def opc2():
format_str = '%s:' * 100
fruits = tuple(str | (i) for i in xrange(100))
out = format_str % fruits
return out
def opc3():
format_str = '{}:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str.format(*fruits)
return out
def opc4():
fruits = tuple(str(i) for i in xrange(100))
out = ':'.join(fruits)
return out
import timeit
print timeit.timeit(stmt=opc4, number=100)
fib_fac()
|
CoinAge-DAO/solari | qa/rpc-tests/keypool.py | Python | mit | 4,289 | 0.006528 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core Developers
// Copyright (c) 2015 Solarminx
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the wallet keypool, and interaction with wallet encryption/locking
# Add python-solarirpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-solarirpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from solarirpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def run_test(nodes, tmpdir):
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
solarid_processes[0].wait()
# Restart node 0
nodes[0] = start_node(0, tmpdir)
# Keep creating keys
addr = nodes[0].getnewaddress()
try:
addr = nodes[0].getnewaddress()
raise AssertionError('Keypool should be exhausted after one address')
except JSONRPCException,e:
assert(e.error['code']==-12)
# put three new keys in the keypool
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(3)
nodes[0].walletlock()
# drain the keys
addr = set()
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
# assert that four unique addresses were returned
assert(len(addr) == 4)
# the next one should fail
try:
addr = nodes[0].getrawchangeaddress()
raise AssertionError('Keypool should be exhausted after three addresses')
except JSONRPCException,e:
assert(e.error['code']==-12)
def main():
import optparse
parser = optparse.OptionParser(usage="%pro | g [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave solarids and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing solarid/solari-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=te | mpfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(1, options.tmpdir)
run_test(nodes, options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: "+str(sys.exc_info()[0]))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
stop_nodes(nodes)
wait_solarids()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
|
justinh5/CipherBox | Arithmetic/__init__.py | Python | mit | 202 | 0 | from Ar | ithmetic.Numbers.NumberArith import Arithmetic
from Arithmetic.Numbers.Modulo import Modulus
from Arithmetic.Numbers.Primality.Primality import Prime
__all__ = ["Arithmetic", "Modulu | s", "Prime"]
|
antoinecarme/sklearn2sql_heroku | tests/classification/FourClass_10/ws_FourClass_10_XGBClassifier_db2_code_gen.py | Python | bsd-3-clause | 138 | 0.014493 | f | rom sklearn2sql_heroku.tests.classification import generic as class_ | gen
class_gen.test_model("XGBClassifier" , "FourClass_10" , "db2")
|
ToonTownInfiniteRepo/ToontownInfinite | toontown/coghq/DistributedBattleFactory.py | Python | mit | 2,002 | 0.001998 | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.battle.BattleBase import *
from toontown.coghq import DistributedLevelBattle
from direct.directnotify import DirectNotifyGlobal
from toontown.toon import TTEmote
from otp.avatar import Emote
from toontown.battle import SuitBattleGlobals
import random
from toontown.suit import SuitDNA
from direct.fsm import State
from direct.fsm import ClassicFSM, State
from toontown.toonbase import ToontownGlobals
from otp.nametag import NametagGlobals
class DistributedBattleFactory(DistributedLevelBattle.DistributedLevelBattle):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleFactory')
def __init__(self, cr):
DistributedLevelBattle.DistributedLevelBattle.__init__(self, cr)
self.fsm.addState(State.State('FactoryReward', self.enterFactoryReward, self.exitFactoryReward, ['Resume']))
offState = self.fsm.getStateNamed('Off')
offState.addTransition('FactoryReward')
playMovieState = self.fsm.getStateName | d('PlayMovie')
playMovieState.addTransition('FactoryReward')
def enterFactoryReward(self, ts):
self.notify.info('enterFactoryReward()')
self.disableCollision()
self.delayDeleteMembers()
if self.hasLocalToon():
NametagGlobals.setMasterArrowsOn(0)
if self.bossBattl | e:
messenger.send('localToonConfrontedForeman')
self.movie.playReward(ts, self.uniqueName('building-reward'), self.__handleFactoryRewardDone)
def __handleFactoryRewardDone(self):
self.notify.info('Factory reward done')
if self.hasLocalToon():
self.d_rewardDone(base.localAvatar.doId)
self.movie.resetReward()
self.fsm.request('Resume')
def exitFactoryReward(self):
self.notify.info('exitFactoryReward()')
self.movie.resetReward(finish=1)
self._removeMembersKeep()
NametagGlobals.setMasterArrowsOn(1)
|
nonemaw/MATRIX_01 | COMP9041/ass1/examples/0/ls.py | Python | gpl-2.0 | 82 | 0 | #!/usr/bin/python2.7 -u
import subprocess
subprocess.c | all(['ls', '/dev/nu | ll'])
|
apple/coremltools | coremltools/converters/mil/mil/var.py | Python | bsd-3-clause | 8,592 | 0.000815 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.types import builtin_to_string
from coremltools.converters.mil.mil.types.symbolic import any_symbolic
class Var(object):
"""
Var represents the outputs of an Operation. Most Vars are derived from an
Operation (including cons | t), and all Vars must have `sym_type`.
Example Usage:
from coremltools.converters.mil.mil | import (
Builder as mb,
Function,
types
)
func_inputs = {"a": mb.placeholder(shape=(1,2)),
"b": mb.placeholder(shape=(1,2)) }
with Function(func_inputs) as ssa_func:
a, b = ssa_func.inputs["a"], ssa_func.inputs["b"]
res = mb.add(x=a, y=b) # res is Var
assert types.is_tensor(res.sym_type)
assert res.rank == 2
assert res.dtype == types.float # since a, b are by default float
# value is not available at compile time in this case. If
# materializable, res.val would be a numpy / primitive value
assert res.val is None
Comment: Except InternalVar and Vars created in while_loop and by
placeholder, all Var should only be constructed by Operation to represent
outputs.
Comment: Var hides the details of sym_type vs sym_val vs materialized
value, which was represented by 2 objects prior to refactoring.
# Properties:
name: (str)
name in MIL proto NamedValueType. Name is assigned by the parent
Operation.
sym_type [_sym_type]: (builtin type class)
All Var must have a (possibly symbolic) type, usually derived from
type inference of upstream ops or from default values in _Input.
sym_val [_sym_val]: (builtin type instance)
Possibly symbolic value.
val [_sym_val]: (np.ndarray or python primitive scalar)
Numpy (scalar / tensor) value. `val` is not None iff `sym_val` is
not None and does not contain symbols. Read-only.
op [_op]: (Operation)
The Operation this Var is derived from. May not be None except
for InternalVar. Read-only.
op_output_idx: (int)
Idx of the output from Operation corresponding to _Input. May be
None.
child_ops [_child_ops]: list[Operation]
Ops that take this Var as an input.
"""
__slots__ = [
"name",
"_sym_type",
"_sym_val",
"_op",
"op_output_idx",
"_child_ops",
"consuming_blocks",
]
def __init__(self, name, sym_type, sym_val=None, op=None, op_output_idx=None):
"""
sym_type (builtin type)
sym_val (builtin value)
op (Operation)
op_output_idx (int)
"""
self.name = name
self._sym_type = sym_type
self._sym_val = sym_val
self._op = op
self.op_output_idx = op_output_idx
# An op can appear twice if it consumes a var twice (e.g.,
# add(%1, %1), while_loop(loop_vars=(%1, %1)).
self._child_ops = list()
# A variable may not be consumed by any op (i.e. len(self._child_ops)
# == 0) but is still used as block output. A var can be output of
# multiple blocks (e.g., both current block and nested blocks)
self.consuming_blocks = list()
@property
def sym_type(self):
return self._sym_type
@property
def shape(self):
if types.is_tensor(self._sym_type):
return self._sym_type.get_shape()
return tuple()
@property
def rank(self):
return len(self.shape)
@property
def dtype(self):
if types.is_tensor(self._sym_type):
return self._sym_type.get_primitive()
return self._sym_type
@property
def sym_val(self):
if self._sym_val is None:
return None
return self._sym_val.val
@property
def val(self):
if self._sym_val is None or any_symbolic(self._sym_val.val):
return None
return self._sym_val.val
@property
def op(self):
return self._op
@property
def child_ops(self):
return self._child_ops
def add_child_op(self, new_op):
self._child_ops.append(new_op)
def remove_child_op(self, target_op, no_check=False):
if target_op not in self._child_ops:
if no_check:
return # no-op
msg = "Op {} does not takes Var {} as input"
raise ValueError(msg.format(target_op.name, self.name))
self._child_ops.remove(target_op)
def shape_str(self):
annotation = ""
if self.val is not None:
annotation = "*"
elif self.sym_val is not None:
annotation = "^"
shape_str = str(self.shape)[:-1] # trim the ")"
if self.rank > 1:
shape_str += ", "
if types.builtin_to_string(self.dtype) is None:
shape_str += ")" + annotation
else:
shape_str += types.builtin_to_string(self.dtype) + ")" + annotation
return shape_str
def type_str(self):
is_tensor = types.is_tensor(self.sym_type)
is_list = types.is_list(self.sym_type)
if is_tensor:
type_string = "(Tensor)"
elif is_list:
type_string = "(List)"
else:
type_string = "(Scalar)"
return type_string
def set_name(self, name):
self.name = name
def is_tensor_or_scalar_of(self, dtype: str):
return (types.is_tensor(self.sym_type) or types.is_scalar(self.sym_type)) and builtin_to_string(self.dtype) == dtype
def __str__(self):
return "%" + self.name + ": " + self.shape_str() + self.type_str()
class ListVar(Var):
__slots__ = ["_elem_type", "init_length", "dynamic_length"]
def __init__(
self, name, elem_type=None, init_length=None, dynamic_length=True, sym_val=None, **kwargs
):
"""
elem_type (builtin.tensor)
init_length (int): initial length
dynamic_length (bool): True to allow list to grow. False uses
init_length as the fixed size (init_length is runtime length).
sym_val: value of the list, if available
"""
super(ListVar, self).__init__(
name=name,
sym_type=types.list(elem_type, init_length, dynamic_length),
sym_val=sym_val,
**kwargs
)
self._elem_type = elem_type
self.init_length = init_length
self.dynamic_length = dynamic_length
@property
def shape(self):
raise ValueError("shape not applicable to ListVar '{}'.".format(self.name))
@property
def rank(self):
raise ValueError("rank not applicable to ListVar '{}'".format(self.name))
@property
def dtype(self):
raise ValueError("dtype not applicable to ListVar '{}'".format(self.name))
@property
def elem_type(self):
return self._elem_type
@property
def elem_shape(self):
if self._elem_type == types.unknown:
return None
return self._elem_type.get_shape()
def shape_str(self):
length = "?"
if not self.dynamic_length:
length = str(self.init_length)
if self._elem_type == types.unknown:
return "List[{}, unknown]".format(length)
if self._elem_type == types.str:
return "List[{}, str]".format(length)
elif self._elem_type == types.int64:
return "List[{}, int]".format(length)
else:
elem_shape = self._elem_type.get_shape()
elem_dtype = self._elem_type.get_primitive()
shape_str = str(elem_shape)[:-1] # trim the ")"
if len(elem_shape) > 1:
shape_str += ", "
shape_str += types.builtin_to_string(elem_dtype) + ")"
return "List[{}, {}]".format(length, shape_str)
class InternalVar(Var):
"""
Internal Var (with '__' |
ets-labs/python-dependency-injector | tests/unit/providers/traversal/test_method_caller_py3.py | Python | bsd-3-clause | 1,790 | 0 | """MethodCaller provider traversal tests."""
from dependency_injector import providers
def test_traverse():
provider1 = providers.Provider()
provided = provider1.provided
method = provided.method
provider = method.call()
all_providers = list(provider.traverse())
assert len(all_providers) == 3
assert provider1 in all_providers
assert provided in all_providers
assert method in all_providers
def test_traverse_args():
provider1 = providers.Provider()
provided = provider1.provided
method = provided.method
provider2 = providers.Provider()
provider = method.call("foo", provider2)
all_providers = list(provider.traverse())
assert len(all_providers) == 4
assert provider1 in all_providers
assert provider2 in all_providers
assert provided in all_provide | rs
assert method in all_providers
def test_traverse_kwargs | ():
provider1 = providers.Provider()
provided = provider1.provided
method = provided.method
provider2 = providers.Provider()
provider = method.call(foo="foo", bar=provider2)
all_providers = list(provider.traverse())
assert len(all_providers) == 4
assert provider1 in all_providers
assert provider2 in all_providers
assert provided in all_providers
assert method in all_providers
def test_traverse_overridden():
provider1 = providers.Provider()
provided = provider1.provided
method = provided.method
provider2 = providers.Provider()
provider = method.call()
provider.override(provider2)
all_providers = list(provider.traverse())
assert len(all_providers) == 4
assert provider1 in all_providers
assert provider2 in all_providers
assert provided in all_providers
assert method in all_providers
|
thomasdouenne/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/examples/transports/regress/regress_determinants_ticpe.py | Python | agpl-3.0 | 4,104 | 0.009024 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 22 09:47:41 2015
@author: thomas.douenne
"""
from __future__ import division
import statsmodels.formula.api as smf
from openfisca_france_indirect_taxation.examples.utils_example import simulate_df_calee_by_grosposte
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
simulated_variables = [
'pondmen',
'revtot',
'rev_disp_loyerimput',
'depenses_carburants',
'depenses_essence',
'depenses_diesel',
'strate',
'nenfants',
'nadultes',
'situacj',
'situapr',
'niveau_vie_decile'
]
for year in [2005]:
data_for_reg = simulate_df_calee_by_grosposte(simulated_variables = simulated_variables, year = year)
# In 2005 3 people consume fuel while their rev_disp_loyerimput is 0. Creates inf number in part_carburants
data_for_reg = data_for_reg[data_for_reg['rev_disp_loyerimput'] > 0]
data_for_reg['rev_disp_loyerimput_2'] = data_for_reg['rev_disp_loyerimput'] ** 2
data_for_reg['part_carburants'] = data_for_reg['depenses_carburants'] / data_for_reg['rev_disp_loyerimput']
| data_for_reg['part_diesel'] = data_for_reg['depenses_diesel'] / data_for_reg['rev_disp_loyerimput']
data_for_reg['part_essence'] = data_for_reg['depenses_essence'] / data_for_reg['rev_disp_loyerimput']
data_for_reg['rural'] = 0
data_for_reg['petite_vi | lles'] = 0
data_for_reg['villes_moyennes'] = 0
data_for_reg['grandes_villes'] = 0
data_for_reg['agglo_paris'] = 0
data_for_reg.loc[data_for_reg['strate'] == 0, 'rural'] = 1
data_for_reg.loc[data_for_reg['strate'] == 1, 'petite_villes'] = 1
data_for_reg.loc[data_for_reg['strate'] == 2, 'villes_moyennes'] = 1
data_for_reg.loc[data_for_reg['strate'] == 3, 'grandes_villes'] = 1
data_for_reg.loc[data_for_reg['strate'] == 4, 'agglo_paris'] = 1
deciles = ['decile_1', 'decile_2', 'decile_3', 'decile_4', 'decile_5', 'decile_6', 'decile_7', 'decile_8',
'decile_9', 'decile_10']
for decile in deciles:
data_for_reg[decile] = 0
number = decile.replace('decile_', '')
data_for_reg.loc[data_for_reg['niveau_vie_decile'] == int(number), decile] = 1
# Situation vis-à-vis de l'emploi :
# Travaille : emploi, stage, étudiant
# Autres : chômeurs, retraités, personnes au foyer, autres
data_for_reg['cj_travaille'] = 0
data_for_reg['pr_travaille'] = 0
data_for_reg.loc[data_for_reg['situacj'] < 4, 'cj_travaille'] = 1
data_for_reg.loc[data_for_reg['situacj'] == 0, 'cj_travaille'] = 0
data_for_reg.loc[data_for_reg['situapr'] < 4, 'pr_travaille'] = 1
data_for_reg['travaille'] = data_for_reg['cj_travaille'] + data_for_reg['pr_travaille']
regression_carburants = smf.ols(formula = 'part_carburants ~ \
decile_1 + decile_2 + decile_3 + decile_4 + decile_5 + decile_6 + decile_7 + decile_8 + decile_9 + \
rural + petite_villes + grandes_villes + agglo_paris + \
nenfants + nadultes + travaille',
data = data_for_reg).fit()
print regression_carburants.summary()
regression_diesel = smf.ols(formula = 'part_diesel ~ \
decile_1 + decile_2 + decile_3 + decile_4 + decile_5 + decile_6 + decile_7 + decile_8 + decile_9 + \
rural + petite_villes + grandes_villes + agglo_paris + \
nenfants + nadultes + travaille',
data = data_for_reg).fit()
print regression_diesel.summary()
regression_essence = smf.ols(formula = 'part_essence ~ \
decile_1 + decile_2 + decile_3 + decile_4 + decile_5 + decile_6 + decile_7 + decile_8 + decile_9 + \
rural + petite_villes + grandes_villes + agglo_paris + \
nenfants + nadultes + travaille',
data = data_for_reg).fit()
print regression_essence.summary()
# It is tempting to add a variable 'vehicule'. However, I think it is a case of bad control. It captures part
# of the effect we actually want to estimate.
|
pybursa/homeworks | e_tverdokhleboff/hw6/hw6_starter.py | Python | gpl-2.0 | 540 | 0.002304 | #!/usr/bin/env python
# | -*- coding: utf-8 -*-
u"""
Основной скрипт запуска ДЗ.
Данный скрипт призван запускать на выполнение домашнее задание #6.
"""
__author__ = "Elena Sharovar"
__date__ = "2014-11-23"
from hw6_solution1 import modifier
def runner():
u"""Запускает выполнение всех задач"""
print "Modifying file..."
modifier("dat | a.csv")
print "Modified successfully!"
if __name__ == '__main__':
runner()
|
Groovy-Dragon/tcRIP | ST_pTuple.py | Python | mit | 2,015 | 0.016377 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 4 10:56:51 2017
@author: lewismoffat
This script is focused on statistics, it calculates the most common pTuples
without clipping and with clipping
"""
#==============================================================================
# Module Imports
#==============================================================================
import numpy as np
import matplotlib.pyplot as plt
import dataProcessing as dp
import pdb
import seaborn as sns
from collections import defaultdict
from collections import Counter
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from matplotlib import pylab
#=================================================================== | ===========
# Get the data
#==============================================================================
# are we doing the full set
singlePatient=False
# which patient to get data from
patient=['Complete']
chain = "beta"
if singlePatient:
print('Patient: '+patient[0])
delim = ["naiv | e",chain]+patient #other delimiters
else:
print('Patient: All')
delim = ["naive",chain] #other delimiters
seqs, vj = dp.loadAllPatients(delim) # these gets all the sequences and vj values
#==============================================================================
# Clipping the data
#==============================================================================
# filter out joint sequences
seqs[0], seqs[1], vj[0], vj[1], joint = dp.removeDup(seqs[0], seqs[1], vj[0], vj[1])
# filter to 14 long, together its still 200k seqs
seqs[0]=dp.filtr(seqs[0], 14)
seqs[1]=dp.filtr(seqs[1], 14)
# clip the sequences
for idx, group in enumerate(seqs):
for idx2, seq in enumerate(group):
group[idx2]=seq[4:10]
# get tuples, list is already flat
seqs[0]=dp.expandTuples(seqs[0],n=4)
seqs[1]=dp.expandTuples(seqs[1],n=4)
# make counters
c4=Counter(seqs[0])
c8=Counter(seqs[1])
print(c4.most_common(n=10))
print()
print(c8.most_common(n=10))
|
xkmato/tracpro | tracpro/polls/migrations/0012_response_status.py | Python | bsd-3-clause | 825 | 0.001212 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def populate_status(apps, schema_editor):
Response = apps.get_model("p | olls", "Response")
for response in Response.objects.all():
response.status = 'C' if response.is_complete else 'E'
response.save(update_fields=('status',))
class Migration(migrations.Migration):
dependencies = [
('polls', '0011_issue_regions'),
]
operations = [
migrations.AddField(
model_name='response',
name='sta | tus',
field=models.CharField(default='C', help_text='Current status of this response', max_length=1, verbose_name='Status', choices=[('E', 'Empty'), ('P', 'Partial'), ('C', 'Complete')]),
preserve_default=False,
),
]
|
NaturalSolutions/NsPortal | Back/ns_portal/utils/utils.py | Python | mit | 380 | 0 | from pyramid.security import (
_get_authentication_po | licy
)
def my | _get_authentication_policy(request):
# CRITICAL
# _get_authentication_policy(request)
# this method will return the instanciate singleton object that handle
# policy in pyramid app
# the policy object store keys from conf for generate token
return _get_authentication_policy(request)
|
googleapis/python-translate | samples/snippets/hybrid_glossaries/hybrid_tutorial_test.py | Python | apache-2.0 | 3,065 | 0.000326 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import uuid
from hybrid_tutorial import create_glossary
from hybrid_tutorial import pic_to_text
from hybrid_tutorial import text_to_speech
from hybrid_tutorial import translate_text
PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"]
# VISION TESTS
def test_vision_standard_format():
# Generate text using Vision API
text = pic_to_text('resources/standard_format.jpeg')
assert len(text) > 0
# TRANSLATE TESTS
def test_create_and_delete_glossary():
sys.path.insert(1, "../")
from beta_snippets import delete_glossary
languages = ["fr", "en"]
glossary_name = f"test-glossary-{uuid.uuid4()}"
glossary_uri = "gs://cloud-samples-data/translation/bistro_glossary.csv"
# create_glossary will raise an exception if creation fails
create_glossary(languages, PROJECT_ID, glossary_name, glossary_uri)
# Delete glossary so that future tests will pass
# delete_glossary will raise an exception if deletion fails
delete_glossary(PROJECT_ID, glossary_name)
def test_translate_standard():
expected_text = "Hello"
# attempt to create glossary, fails if it already exists
languages = ["fr", "en"]
glossary_name = "bistro-glossary"
glossary_uri = f"gs://cloud-samples-data/translation/{glossary_name}.csv"
create_glossary(languages, PROJECT_ID, glossary_name, glossary_uri)
text = translate_text("Bonjour", "fr", "en", PROJECT_ID, "bistro-glossary")
assert text == expected_text
def test_translate_glossary():
expected_text = "I eat goat cheese"
input_text = "Je mange du chevre"
# attempt to create glossary, fails if it already exists
languages = ["fr", "en"]
glossary_name = "bistro-glossary"
glossary_uri = f"gs://cloud-samples-data/translation/{glossary_name}.csv"
create_glossary(languages, PROJECT_ID, glossary_name, glossary_uri)
text = translate_text(input_text, "fr", "en", PROJECT_ID, "bistro-glossary")
assert text == expected_text
# TEXT-TO-SPEECH TESTS
def test_tts_standard(capsys):
outfile = "resources/test_standard_text.mp3"
textfile = "resources/standard_format.txt"
with open(textfile, "r") as f:
text = f.read()
| text_to_speech(text, outfile)
# Assert audio file generated
assert os.path.isfile(outfile)
out, err = capsys.r | eadouterr()
# Assert success message printed
assert "Audio content written to file " + outfile in out
# Delete test file
os.remove(outfile)
|
qingpingguo/git-repo | subcmds/list.py | Python | apache-2.0 | 2,535 | 0.007101 | #
# Copyright (C) 2011 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from command import Command, MirrorSafeCommand
class List(Command, MirrorSafeCommand):
common = True
helpSummary = "List projects and their associated directories"
helpUsage = """
%prog [-f] [<project>...]
%prog [-f] -r str1 [str2]..."
"""
helpDescription = """
List all projects; pass '.' to list the project for the cwd.
This is similar to running: repo forall -c 'echo "$REPO_PATH : $REPO_PROJECT"'.
"""
def _Options(self, p, show_smart=True):
p.add_option('-r', '--regex',
dest='regex', action='store_true',
help="Filter the project list based on regex or wildcard matching of strings")
p.add_option('-f', '--fullpath',
dest='fullpath | ', action='store_true',
help="Display the full work tree path instead of the relative path")
def Execute(self, opt, args):
"""List all projects and the associated directories.
This may be possible to do with 'repo forall', but repo newbi | es have
trouble figuring that out. The idea here is that it should be more
discoverable.
Args:
opt: The options.
args: Positional args. Can be a list of projects to list, or empty.
"""
if not opt.regex:
projects = self.GetProjects(args)
else:
projects = self.FindProjects(args)
def _getpath(x):
if opt.fullpath:
return x.worktree
return x.relpath
lines = []
for project in projects:
lines.append("%s : %s" % (_getpath(project), project.name))
lines.sort()
print '\n'.join(lines)
def FindProjects(self, args):
result = []
for project in self.GetProjects(''):
for arg in args:
pattern = re.compile(r'%s' % arg, re.IGNORECASE)
if pattern.search(project.name) or pattern.search(project.relpath):
result.append(project)
break
result.sort(key=lambda project: project.relpath)
return result
|
iamweilee/pylearn | builtin-callable-example-1.py | Python | mit | 767 | 0.018253 | '''
callable º¯Êý, ¿ÉÒÔ¼ì²éÒ»¸ö¶ÔÏóÊÇ·ñÊǿɵ÷ÓõÄ(ÎÞÂÛÊÇÖ±½Óµ÷ÓûòÊÇͨ | ¹ý apply ).
¶ÔÓÚº¯Êý, ·½·¨, lambda º¯Ê½, Àà, ÒÔ¼°ÊµÏÖÁË __call__ ·½·¨µÄÀàʵÀý, Ëü¶¼·µ»Ø True.
'''
def dump(function):
if callable(function):
print function, "is callable"
else:
print function, "is *not* callable"
class A:
def method(self | , value):
return value
class B(A):
def __call__(self, value):
return value
a = A()
b = B()
dump(0) # simple objects
dump("string")
dump(callable)
dump(dump) # function
dump(A) # classes
dump(B)
dump(B.method)
dump(a) # instances
dump(b)
dump(b.method)
'''
×¢ÒâÀà¶ÔÏó (A ºÍ B) ¶¼Êǿɵ÷ÓõÄ; Èç¹ûµ÷ÓÃËüÃÇ, ¾Í²úÉúеĶÔÏó(ÀàʵÀý). µ«ÊÇ A ÀàµÄʵÀý²»¿Éµ÷ÓÃ, ÒòΪËüµÄÀàûÓÐʵÏÖ __call__ ·½·¨.
''' |
plotly/python-api | packages/python/plotly/plotly/validators/splom/marker/line/_widthsrc.py | Python | mit | 465 | 0 | import _plotly_utils.basevalidators
class WidthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="widthsrc", parent_name="splom.marker.line", **kwargs
):
super(WidthsrcValidator, self).__init__(
plotly_name=plotly_name,
paren | t_name=parent_name,
edit_type=kwargs.pop("edit_type", "no | ne"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
PyCQA/pylint | tests/functional/a/await_outside_async.py | Python | gpl-2.0 | 579 | 0.006908 | # pylint: disable=missing-docstring,unused-variable
import asyncio
async def nested():
return 42
async def main():
nested()
print(await nested()) # This is | okay
def not_async():
print(await nested()) # [await-outside-async]
async def func(i):
return i**2
async def okay_function():
var = [await func(i) for i in range(5)] # This should be okay
# Test nested functions
async def func2():
def inner_func():
await asyncio.sleep | (1) # [await-outside-async]
def outer_func():
async def inner_func():
await asyncio.sleep(1)
|
subhacom/moose-core | tests/python/test_vec.py | Python | gpl-3.0 | 92 | 0 | import moose
foo = moose.Pool('/foo1 | ', 500)
bar = moose.vec('/foo1')
assert len(bar) == | 500
|
sametmax/Django--an-app-at-a-time | ignore_this_directory/django/http/multipartparser.py | Python | mit | 24,849 | 0.001207 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
import base64
import binascii
import cgi
from urllib.parse import unquote
from django.conf import settings
from django.core.exceptions import (
RequestDataTooBig, SuspiciousMultipartForm, TooManyFieldsSent,
)
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.text import unescape_entities
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser:
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the
uploaded data.
:encoding:
The encoding with which to treat the incoming data.
"""
# Content-Type should contain multipart and the boundary information.
content_type = META.get('CONTENT_TYPE', '')
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary.decode())
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, str):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Return a tuple containing the POST and FILES dictionary, respectively.
"""
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict(encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(
self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding,
)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict(mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
# Number of bytes that have been read.
num_bytes_read = 0
# To count the number of keys in the request.
num_post_keys = 0
# To limit the amount of data read from the request.
read_size = None
try:
for item_type, meta_data, field_stream in Parser(stream, self._b | oundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
| old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
num_post_keys += 1
if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None and
settings.DATA_UPLOAD_MAX_NUMBER_FIELDS < num_post_keys):
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
# Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read(size=read_size)
num_bytes_read += len(raw_data)
try:
data = base64.b64decode(raw_data)
except binascii.Error:
data = raw_data
else:
data = field_stream.read(size=read_size)
num_bytes_read += len(data)
# Add two here to make the check consistent with the
# x-www-form-urlencoded check that includes '&='.
num_bytes_read += len(field_name) + 2
if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
num_bytes_read > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
self._post.appendlist(field_name, force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if file_name:
|
brady-vitrano/full-stack-django-kit | fabfile/docs.py | Python | mit | 877 | 0.002281 | from fabric.api import task, local, run
from fabric.context_managers import lcd
import settings
@task(default=True)
def build():
"""
(Default) Build Sphinx HTML documentation
"""
with lcd('docs'):
local('make html')
@task()
def deplo | y():
"""
Upload docs to server
"""
build()
destination = '/usr/share/nginx/localhost/mysite/docs/build/html'
if settings.environment == 'vagrant':
local(" | rsync -avz --rsync-path='sudo rsync' -e 'ssh -p 2222 -i .vagrant/machines/web/virtualbox/private_key -o StrictHostKeyChecking=no' docs/build/html/ %s@%s:%s " % ('vagrant', 'localhost', destination))
elif settings.environment == 'ci':
local("rsync -avz --rsync-path='sudo rsync' -e 'ssh -p 2222 -i /var/go/id_rsa_web -o StrictHostKeyChecking=no' docs/build/html/ %s@%s:%s " % ('vagrant', '192.168.10.10', destination))
|
benhoff/reddit_helper | reddit_helper/github.py | Python | gpl-3.0 | 1,071 | 0.002801 | import requests
import datetime
def get_most_recent_commits(github_name, hours_to_go_back=8):
url = 'https://api.github.com/users/{}/events'.format(github_name)
r = requests.get(url)
events = r.json()
push_events = []
for event in events:
if event['type'] == u'PushEvent':
push_events.append(event)
most_recent_commits = {}
current_time = datetime.datetime.utcnow()
tweleve_hour_delta = datetime.timedelta(hours=hours_to_go_back)
for push_event in push_events:
push_time = datetime.datetime.strptime(push_event['created_at'],
'%Y-%m-%dT%H:%M:%SZ')
delta_push_time = current_time - push_time
if delta_push_time < tweleve_hour_delta:
repo_name = push_event['repo']['name']
if repo_name in most_recent_commits:
| break
| else:
latest_commit = push_event['payload']['commits'][0]
most_recent_commits[repo_name] = latest_commit
return most_recent_commits
|
nyarasha/firemix | patterns/radial_gradient.py | Python | gpl-3.0 | 7,317 | 0.003553 | # This file is part of Firemix.
#
# Copyright 2013-2016 Jonathan Evans <jon@craftyjon.com>
#
# Firemix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Firemix | is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# ME | RCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Firemix. If not, see <http://www.gnu.org/licenses/>.
import colorsys
import random
import math
import numpy as np
import ast
from lib.pattern import Pattern
from lib.colors import clip
from lib.parameters import FloatParameter, StringParameter
from lib.color_fade import ColorFade
class RadialGradient(Pattern):
"""Radial gradient that responds to onsets"""
_luminance_steps = 256
_fader_steps = 256
def setup(self):
self.add_parameter(FloatParameter('speed', 0.1))
self.add_parameter(FloatParameter('hue-width', 1.5))
self.add_parameter(FloatParameter('hue-step', 0.1))
self.add_parameter(FloatParameter('wave1-amplitude', 0.5))
self.add_parameter(FloatParameter('wave1-period', 1.5))
self.add_parameter(FloatParameter('wave1-speed', 0.05))
self.add_parameter(FloatParameter('wave2-amplitude', 0.5))
self.add_parameter(FloatParameter('wave2-period', 1.5))
self.add_parameter(FloatParameter('wave2-speed', 0.1))
self.add_parameter(FloatParameter('rwave-amplitude', 0.5))
self.add_parameter(FloatParameter('rwave-standing', 0.0))
self.add_parameter(FloatParameter('rwave-period', 1.5))
self.add_parameter(FloatParameter('rwave-speed', 0.1))
self.add_parameter(FloatParameter('radius-scale', 1.0))
self.add_parameter(FloatParameter('audio-radius-scale', 0.0))
self.add_parameter(FloatParameter('audio-amplitude', 0.0))
self.add_parameter(FloatParameter('audio-boost', 0.0))
self.add_parameter(FloatParameter('audio-brightness', 0.0))
self.add_parameter(FloatParameter('audio-scale', 0.0))
self.add_parameter(FloatParameter('audio-use-fader', 0.0))
self.add_parameter(FloatParameter('audio-energy-lum-time', 0.0))
self.add_parameter(FloatParameter('audio-energy-lum-strength', 0.0))
self.add_parameter(FloatParameter('audio-fader-percent', 1.0))
self.add_parameter(FloatParameter('luminance-speed', 0.01))
self.add_parameter(FloatParameter('luminance-scale', 1.0))
self.add_parameter(StringParameter('color-gradient', "[(0,0,1), (0,1,1)]"))
self.hue_inner = random.random()
self.wave1_offset = 0 #andom.random()
self.wave2_offset = random.random()
self.rwave_offset = random.random()
self.luminance_offset = random.random()
cx, cy = self.scene().center_point()
self.locations = self.scene().get_all_pixel_locations()
x,y = self.locations.T
x -= cx
y -= cy
self.pixel_distances = np.sqrt(np.square(x) + np.square(y))
self.pixel_angles = math.pi + np.arctan2(y, x)
self.pixel_distances /= max(self.pixel_distances)
super(RadialGradient, self).setup()
def parameter_changed(self, parameter):
fade_colors = ast.literal_eval(self.parameter('color-gradient').get())
self._fader = ColorFade(fade_colors, self._fader_steps)
def reset(self):
pass
def draw(self, dt):
if self._mixer.is_onset():
self.hue_inner = math.fmod(self.hue_inner + self.parameter('hue-step').get(), 1.0)
self.luminance_offset += self.parameter('hue-step').get()
dt *= 1.0 + self.parameter('audio-boost').get() * self._mixer.audio.getLowFrequency()
self.hue_inner += dt * self.parameter('speed').get()
self.wave1_offset += self.parameter('wave1-speed').get() * dt
self.wave2_offset += self.parameter('wave2-speed').get() * dt
self.rwave_offset += self.parameter('rwave-speed').get() * dt
self.luminance_offset += self.parameter('luminance-speed').get() * dt
luminance_scale = self.parameter('luminance-scale').get() + self._mixer.audio.smoothEnergy * self.parameter('audio-scale').get()
if self.parameter('rwave-standing').get():
rwave = np.sin(self.pixel_distances * self.parameter('rwave-period').get()) * self.parameter('rwave-standing').get() * np.sin(self.rwave_offset)
rwave += np.sin(self.rwave_offset + np.pi * 0.75) * self.parameter('rwave-standing').get()
else:
rwave = np.abs(np.sin(self.rwave_offset + self.pixel_distances * self.parameter('rwave-period').get()) * self.parameter('rwave-amplitude').get())
pixel_angles = self.pixel_angles + rwave
wave1 = np.abs(np.cos(self.wave1_offset + pixel_angles * self.parameter('wave1-period').get()) * self.parameter('wave1-amplitude').get())
wave2 = np.abs(np.cos(self.wave2_offset + pixel_angles * self.parameter('wave2-period').get()) * self.parameter('wave2-amplitude').get())
hues = self.pixel_distances * (self.parameter('radius-scale').get() + self._mixer.audio.getSmoothEnergy() * self.parameter('audio-radius-scale').get()) + wave1 + wave2
audio_amplitude = self.parameter('audio-amplitude').get()
fft = self._mixer.audio.getSmoothedFFT()
if len(fft) > 0 and audio_amplitude:
audio_pixel_angles = np.mod(pixel_angles / (math.pi * 2) + 1, 1)
fft_size = len(fft)
bin_per_pixel = np.int_(audio_pixel_angles * fft_size)
wave_audio = audio_amplitude * np.asarray(fft)[bin_per_pixel]
hues += wave_audio
if self.parameter('audio-energy-lum-strength').get():
lums = np.mod(np.int_(hues * self.parameter('audio-energy-lum-time').get()), self._luminance_steps)
lums = self._mixer.audio.fader.color_cache.T[0][lums] * self.parameter('audio-energy-lum-strength').get()
else:
lums = hues
luminance_indices = np.mod(np.abs(np.int_((self.luminance_offset + lums * luminance_scale) * self._luminance_steps)), self._luminance_steps)
LS = self._fader.color_cache[luminance_indices].T
luminances = LS[1]
luminances += self._mixer.audio.getEnergy() * self.parameter('audio-brightness').get()
hues = np.fmod(self.hue_inner + hues * self.parameter('hue-width').get(), 1.0)
if self.parameter('audio-use-fader').get():
#luminances *= self._mixer.audio.fader.color_cache.T[1][np.int_(luminance_indices * self.parameter('audio-fader-percent').get())] * self.parameter('audio-use-fader').get()
#hues += self._mixer.audio.fader.color_cache.T[0][np.int_(hues * 255 * self.parameter('audio-fader-percent').get())] * self.parameter('audio-use-fader').get()
hues += self._mixer.audio.fader.color_cache.T[0][np.int_(luminance_indices * self.parameter('audio-fader-percent').get())] * self.parameter('audio-use-fader').get()
self.setAllHLS(hues, luminances, LS[2])
|
igoroya/igor-oya-solutions-cracking-coding-interview | crackingcointsolutions/chapter2/exerciseeight.py | Python | mit | 2,720 | 0.003309 | '''
Created on 23 Aug 2017
Loop detection: Given a circular linked list, implement an algorithm
that returns the beginning of the loop
DEFINITION
Circular linked list: A (corrupt) linked list in which a node's next pointer points
to another as to make a loop in the linked list.
EXAMPLE:
Input A -> B -> C -> D -> E -> C [the same as C earlier]
Output: C
@author: igoroya
'''
from chapter2 import utils
# Idea: Store in a set a tuple of (node, next_node)
# Run list and check if new tuple is already in list
de | f is_circular(my_list):
my_set = set()
node = my_list.head_node
count = 0
while node.next_node is not None:
if (node.cargo, node.next_node.cargo) in my_set:
# indication that may be same, make that "node"
# is referenced before by runnign again
if is_node_in_list_num_nodes(node.next_node, m | y_list, count):
return True, node
my_set.add((node.cargo, node.next_node.cargo))
node = node.next_node
count += 1
return False, None
def is_node_in_list_num_nodes(my_node, my_list, n):
'''
Find is a node is in a list's first N nodes
'''
count = 0
node = my_list.head_node
while node is not None:
if my_node is node:
return True
if count == n:
break
node = node.next_node
count += 1
return False
if __name__ == '__main__':
# a well behaving list
str1 = "here"
list1 = utils.SinglyLinkedList()
list1.append("a")
list1.append("e")
list1.append("i")
list1.append("o")
list1.append(str1)
print("Is circular?: {}, at {}".format(*is_circular(list1)))
# corrupted list now:
list1.head_node.next_node.next_node.next_node.next_node = list1.head_node.next_node
print("Is circular?: {}, at {}".format(*is_circular(list1)))
# this is not corrupted
str1 = "here"
list1 = utils.SinglyLinkedList()
list1.append("a")
list1.append("e")
list1.append("i")
list1.append("o")
list1.append(str1)
list1.append("a")
list1.append("e")
list1.append("i")
list1.append("o")
list1.append(str1)
print("Is circular?: {}, at {}".format(*is_circular(list1)))
# this is corrupted
str1 = "here"
list1 = utils.SinglyLinkedList()
list1.append("a")
list1.append("e")
list1.append("i")
list1.append("o")
list1.append(str1)
list1.append("a")
list1.append("e")
list1.append("i")
list1.append("o")
list1.append(str1)
list1.head_node.next_node.next_node.next_node.next_node.next_node.next_node = list1.head_node.next_node.next_node
print("Is circular?: {}, at {}".format(*is_circular(list1)))
|
ega1979/ros_book_programs | hello_world.py | Python | bsd-2-clause | 86 | 0 | i | mport rospy
rospy.init_node('hello_world')
rospy.loginfo('Hello World')
rospy.spin() | |
hylom/grrreader | backend/feedfetcher.py | Python | gpl-2.0 | 889 | 0.00225 | #!/usr/bin/python
"feed fetcher"
from db import MySQLDatabase
from fetcher import FeedFetcher
def main():
| db = MySQLDatabase()
fetcher = FeedFetcher()
feeds = db.get_feeds(offset=0, limit=10)
read_count = 10
while len(feeds) > 0:
for feed in feeds:
| fid = feed[0]
url = feed[1]
title = feed[2]
print "fetching #{0}: {1}".format(fid, url)
entries = fetcher.fetch(url)
for entry in entries:
entry.feed_id = fid
try:
print "insert {0}".format(entry.url)
except UnicodeEncodeError:
print "insert {0}".format(entry.url.encode('utf-8'))
db.append_feed_content(entry)
feeds = db.get_feeds(offset=read_count, limit=10)
read_count += 10
if __name__ == '__main__':
main()
|
bongo-project/bongo | src/apps/storetool/bongo/storetool/CalendarCommands.py | Python | gpl-2.0 | 14,791 | 0.003786 | import bongo.external.simplejson as simplejson
import bongo.external.vobject as vobject
import logging
import os
import re
import time
import random
import md5
import email
from email.MIMEText import MIMEText
from email.MIMEMessage import MIMEMessage
from email.MIMEMultipart import MIMEMultipart
from email.Message import Message
import bongo.table as table
from bongo.cmdparse import Command
from bongo.Contact import Contact
from bongo.BongoError import BongoError
from libbongo.libs import bongojson, msgapi
from bongo.store.StoreClient import DocTypes, StoreClient, CalendarACL
from bongo.store.QueueClient import QueueClient
class CalendarsCommand(Command):
log = logging.getLogger("Bongo.StoreTool")
def __init__(self):
Command.__init__(self, "calendar-list", aliases=["cl"],
summary="List the calendars in your store")
def Run(self, options, args):
store = StoreClient(options.user, options.store)
cols = ["subd?", "Name", "Url"]
rows = []
try:
cals = list(store.List("/calendars", props=["bongo.calendar.url"]))
for cal in cals:
subd = cal.props.has_key("bongo.calendar.url") and "Yes" or None
rows.append([subd, cal.filename,
cal.props.get("bongo.calendar.url")])
finally:
store.Quit()
print table.format_table(cols, rows)
class CalendarEventsCommand(Command):
log = logging.getLogger("Bongo.StoreTool")
def __init__(self):
Command.__init__(self, "calendar-events", aliases=["ce"],
summary="List the events in a calendar",
usage="%prog %cmd <calendar>")
def _FindCalendar(self, store, name):
cals = list(store.List("/calendars"))
for cal in cals:
if cal.filename == name:
return cal
def Run(self, options, args):
store = StoreClient(options.user, options.store)
cols = ["Summary", "Start", "End"]
rows = []
try:
cal = self._FindCalendar(store, args[0])
if cal is None:
print "Could not find calendar named '%s'" % args[0]
return
events = list(store.Events(cal.uid, ["nmap.document",
"nmap.event.calendars"]))
for event in events:
jsob = simplejson.loads(event.props["nmap.document"].strip())
comp = jsob["components"][0]
summary = comp.get("summary")
if summary is not None:
summary = summary.get("value")
start = comp.get("start")
if start is not None:
start = start.get("value")
end = comp.get("end")
if end is not None:
end = end.get("value")
rows.append((summary, start, end))
finally:
store.Quit()
rows.sort()
print table.format_table(cols, rows)
class EventsDeleteCommand(Command):
log = logging.getLogger("Bongo.StoreTool")
def __init__(self):
Command.__init__(self, "events-delete", aliases=["ed"],
summary="Delete all events in the store")
def Run(self, options, args):
store = StoreClient(options.user, options.store)
try:
events = list(store.Events())
for event in events:
cals = store.PropGet(event.uid, "nmap.event.calendars")
cals = cals.strip().split("\n")
for cal in cals:
if cal == "":
continue
print "unlinking event", event.uid
store.Unlink(cal, event.uid)
store.Delete(event.uid)
finally:
store.Quit()
class EventsCleanupCommand(Command):
log = logging.getLogger("Bongo.StoreTool")
def __init__(self):
Command.__init__(self, "events-cleanup", aliases=["ec"],
summary="Delete any events not linked with calendars")
def Run(self, options, args):
store = StoreClient(options.user, options.store)
try:
events = list(store.Events())
for event in events:
cals = store.PropGet(event.uid, "nmap.event.calendars")
if cals is None or cals == "":
print "deleting event", event.uid
store.Delete(event.uid)
finally:
store.Quit()
class CalendarDeleteCommand(Command):
log = logging.getLogger("Bongo.StoreTool")
def __init__(self):
Command.__init__(self, "calendar-delete", aliases=["cd"],
summary="Delete specified calendars",
usage="%prog %cmd <calendars>")
def _FindCalendar(self, store, name):
cals = list(store.List("/calendars"))
for cal in cals:
if cal.filename == name:
return cal
def Run(self, options, args):
if len(args) == 0:
self.print_help()
self.exit()
store = StoreClient(options.user, options.store)
try:
for calname in args:
cal = self._FindCalendar(store, calname)
if cal is None:
print "Could not find calendar named '%s'" % calname
continue
if calname.lower() != "personal":
print "deleting calendar", cal.uid
store.Delete(cal.uid)
else:
print "events deleted. not deleting calendar", calname
finally:
store.Quit()
class CalendarSubscribeCommand(Command):
log = logging.getLogger("Bongo.StoreTool")
def __init__(self):
Command.__init__(self, "calendar-sub", aliases=["cs"],
summary="Subscribe to specified calendar",
usage="%prog %cmd <name> <url>")
def Run(self, options, args):
if len(args) < 2:
self.print_usage()
self.exit()
(name, url) = args
uid = msgapi.IcsSubscribe(options.store, name, None, url, None, None)
print "Subscribed: uid is", hex(uid)
class CalendarImportCommand(Command):
log = logging.getLogger("Bongo.StoreTool")
def __init__(self):
Command.__init__(self, "calendar-import", aliases=["ci"],
summary="Import a specified calendar file",
usage="%prog %cmd <name> <file>")
def Run(self, options, args):
if len(args) < 2:
self.print_usage()
self.exit()
(name, file) = args
if re.search("^[^/]+://", file):
url = file
else:
file = os.path.realpath(file)
if not os.access(file, os.R_OK):
self.exit("File doesn't exist or isn't readable: %s" % file)
url = "file://" + file
uid = msgapi.IcsImport(options.store, name, None, url, None, None)
print "Imported: uid is", hex(uid)
class CalendarPublishCommand(Command):
log = logging.getLogger("Bongo.StoreTool")
def __init__(self):
Command.__init__(self, "calendar-publish", aliases=["cp"],
summary="Make a specified calendar public, and send an invitation",
usage="%prog %c | md <name> [address, ...]")
def Run(self, options, args):
if len(args) < 1 :
self.print_usage()
self.exit()
doc = "\"/calendars/%s\"" % (args[0])
addresses = args[1:]
store = StoreClient(options.user, options.store)
try:
acl = CalendarACL(store.GetACL(doc))
acl.SetPublic(CalendarACL.Rights.R | ead)
store.SetACL(doc, acl.GetACL())
finally:
store.Quit()
class CalendarUnpublishCommand(Command):
log = logging.getLogger("Bongo.StoreTool")
def __init__(self):
Command.__init__(self, "calendar-u |
rcarneva/rcarneva.github.io | publishconf.py | Python | mit | 533 | 0.005629 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ i | mport unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = 'http://rcarneva.github.io'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following | items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
|
slint/zenodo | zenodo/modules/records/httpretty_mock.py | Python | gpl-2.0 | 2,498 | 0.0004 |
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Mock HTTPretty.
HTTPretty fix related to SSL bug:
https://github.com/gabrielfalcao/HTTPretty/issues/242
"""
import httpretty
import httpretty.core
from httpretty | import HTTPretty as OriginalHTTPretty
try:
from requests.packages.urllib3.contrib.pyopenssl import \
inject_into_urllib3, extract_from_urllib3
pyopenssl_override = True
except:
pyopenssl_override = False
class MyHTTPretty(OriginalHTTPretty):
"""
HTTPretty mock.
| pyopenssl monkey-patches the default ssl_wrap_socket() function in the
'requests' library, but this can stop the HTTPretty socket monkey-patching
from working for HTTPS requests.
Our version extends the base HTTPretty enable() and disable()
implementations to undo and redo the pyopenssl monkey-patching,
respectively.
"""
@classmethod
def enable(cls):
"""Enable method mock."""
OriginalHTTPretty.enable()
if pyopenssl_override:
# Take out the pyopenssl version - use the default implementation
extract_from_urllib3()
@classmethod
def disable(cls):
"""Disable method mock."""
OriginalHTTPretty.disable()
if pyopenssl_override:
# Put the pyopenssl version back in place
inject_into_urllib3()
# Substitute in our version
HTTPretty = MyHTTPretty
httpretty.core.httpretty = MyHTTPretty
# May need to set other module-level attributes here, e.g. enable, reset etc,
# depending on your needs
httpretty.httpretty = MyHTTPretty
|
390910131/Misago | misago/markup/bbcode/blocks.py | Python | gpl-2.0 | 223 | 0 | im | port re
from markdown.blockprocessors import HRProcessor
class BBCodeHRProcessor(HRProcessor):
RE = r'^\[hr\]*'
# Detect hr on any line of a block.
SEARCH_RE = re.compile(RE, re.MULTILINE | | re.IGNORECASE)
|
ianmiell/OLD-shutitdist | bison/bison.py | Python | gpl-2.0 | 1,256 | 0.041401 | """ShutIt module. See http://shutit.tk/
"""
from shutit_module import ShutItModule
class bison(ShutItModule):
def is_installed(self, shutit):
return shutit.file_exists('/root/shutit_build/module_record/' + self.module_id + '/built')
def build(self, shutit):
shutit.send('mkdir -p /tmp/build/bison')
shutit.send('cd /tmp/build/bison')
version = shutit.cfg[self.module_id]['version']
shutit.send('curl -L http://ftp.gnu.org/gnu/bison/bison-' + version + '.tar.gz | tar -zxf -')
shutit.send('cd bison*' + version)
shutit.send('./configure --prefix=/usr --with-libiconv-prefix=/usr')
shutit.send('make')
shutit.send('make install')
return True
def get_config(self, shutit):
shutit.get_config(self.module_id, 'version', '3.0')
return True
#def check_ready(self, shutit):
# return True
#def start(self, shutit):
# return True
#def stop(self, shutit):
# return True
def finalize(self, shutit):
#shutit.send('rm -rf
return True
#def remove(self, shutit):
# return True
#def test(self, shutit):
# return True
def module():
return bison(
'shutit.tk.sd.bison.bison', 15 | 8844782.0039,
descri | ption='Bison compilation',
maintainer='ian.miell@gmail.com',
depends=['shutit.tk.sd.pkg_config.pkg_config']
)
|
luxus/home-assistant | homeassistant/components/light/rfxtrx.py | Python | mit | 6,729 | 0 | """
Support for RFXtrx lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.rfxtrx/
"""
import logging
import homeassistant.components.rfxtrx as rfxtrx
from homeassistant.components.light import ATTR_BRIGHTNESS, Light
from homeassistant.components.rfxtrx import (
ATTR_FIREEVENT, ATTR_NAME, ATTR_PACKETID, ATTR_STATE, EVENT_BUTTON_PRESSED)
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.util import slugify
DEPENDENCIES = ['rfxtrx']
SIGNAL_REPETITIONS = 1
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the RFXtrx platform."""
import RFXtrx as rfxtrxmod
lights = []
signal_repetitions = config.get('signal_repetitions', SIGNAL_REPETITIONS)
for device_id, entity_info in config.get('devices', {}).items():
if device_id in rfxtrx.RFX_DEVICES:
continue
_LOGGER.info("Add %s rfxtrx.light", entity_info[ATTR_NAME])
# Check if i must fire event
fire_event = entity_info.get(ATTR_FIREEVENT, False)
datas = {ATTR_STATE: False, ATTR_FIREEVENT: fire_event}
rfxobject = rfxtrx.get_rfx_object(entity_info[ATTR_PACKETID])
new_light = RfxtrxLight(
entity_info[ATTR_NAME], rfxobject, datas,
signal_repetitions)
rfxtrx.RFX_DEVICES[device_id] = new_light
lights.append(new_light)
add_devices_callback(lights)
def light_update(event):
"""Callback for light updates from the RFXtrx gateway."""
if not isinstance(event.device, rfxtrxmod.LightingDevice) or \
not event.device.known_to_be_dimmable:
return
# Add entity if not exist and the automatic_add is True
device_id = slugify(event.device.id_string.lower())
if device_id not in rfxtrx.RFX_DEVICES:
automatic_add = config.get('automatic_add', False)
if not automatic_add:
return
_LOGGER.info(
"Automatic add %s rfxtrx.light (Class: %s Sub: %s)",
device_id,
event.device.__class__.__name__,
event.device.subtype
)
pkt_id = " | ".join("{0:02x}".format(x) for x in event.data)
entity_name = "%s : %s" % (device_id, pkt_id)
datas = {ATTR_STATE: False, ATTR_FIREEVENT: False}
signal_repetitions = config.get('signal_repetitions',
SIGNAL_REPETITIONS)
new_light = RfxtrxLight(entity_name, event, datas,
signal_repetitions)
rfxtrx.RFX_DEVICE | S[device_id] = new_light
add_devices_callback([new_light])
# Check if entity exists or previously added automatically
if device_id in rfxtrx.RFX_DEVICES:
_LOGGER.debug(
"EntityID: %s light_update. Command: %s",
device_id,
event.values['Command']
)
if event.values['Command'] == 'On'\
or event.values['Command'] == 'Off':
# Update the rfxtrx device state
is_on = event.values['Command'] == 'On'
# pylint: disable=protected-access
rfxtrx.RFX_DEVICES[device_id]._state = is_on
rfxtrx.RFX_DEVICES[device_id].update_ha_state()
elif event.values['Command'] == 'Set level':
# pylint: disable=protected-access
rfxtrx.RFX_DEVICES[device_id]._brightness = \
(event.values['Dim level'] * 255 // 100)
# Update the rfxtrx device state
is_on = rfxtrx.RFX_DEVICES[device_id]._brightness > 0
rfxtrx.RFX_DEVICES[device_id]._state = is_on
rfxtrx.RFX_DEVICES[device_id].update_ha_state()
else:
return
# Fire event
if rfxtrx.RFX_DEVICES[device_id].should_fire_event:
rfxtrx.RFX_DEVICES[device_id].hass.bus.fire(
EVENT_BUTTON_PRESSED, {
ATTR_ENTITY_ID:
rfxtrx.RFX_DEVICES[device_id].entity_id,
ATTR_STATE: event.values['Command'].lower()
}
)
# Subscribe to main rfxtrx events
if light_update not in rfxtrx.RECEIVED_EVT_SUBSCRIBERS:
rfxtrx.RECEIVED_EVT_SUBSCRIBERS.append(light_update)
class RfxtrxLight(Light):
"""Represenation of a RFXtrx light."""
def __init__(self, name, event, datas, signal_repetitions):
"""Initialize the light."""
self._name = name
self._event = event
self._state = datas[ATTR_STATE]
self._should_fire_event = datas[ATTR_FIREEVENT]
self.signal_repetitions = signal_repetitions
self._brightness = 0
@property
def should_poll(self):
"""No polling needed for a light."""
return False
@property
def name(self):
"""Return the name of the light if any."""
return self._name
@property
def should_fire_event(self):
"""Return true if the device must fire event."""
return self._should_fire_event
@property
def is_on(self):
"""Return true if light is on."""
return self._state
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def assumed_state(self):
"""Return True if unable to access real state of entity."""
return True
def turn_on(self, **kwargs):
"""Turn the light on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
if not self._event:
return
if brightness is None:
self._brightness = 255
for _ in range(self.signal_repetitions):
self._event.device.send_on(rfxtrx.RFXOBJECT.transport)
else:
self._brightness = brightness
_brightness = (brightness * 100 // 255)
for _ in range(self.signal_repetitions):
self._event.device.send_dim(rfxtrx.RFXOBJECT.transport,
_brightness)
self._state = True
self.update_ha_state()
def turn_off(self, **kwargs):
"""Turn the light off."""
if not self._event:
return
for _ in range(self.signal_repetitions):
self._event.device.send_off(rfxtrx.RFXOBJECT.transport)
self._brightness = 0
self._state = False
self.update_ha_state()
|
poldracklab/mriqc | mriqc/classifier/sklearn/__init__.py | Python | bsd-3-clause | 1,178 | 0 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
from mriqc.classifier.sklearn._split import RobustLeavePGroupsO | ut
from mriqc.classifier.sklearn.cv_nested import ModelAndGridSearchCV
from mriqc.classifier.sklearn.p | arameters import ModelParameterGrid
__all__ = [
"ModelParameterGrid",
"ModelAndGridSearchCV",
"RobustLeavePGroupsOut",
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.