repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
kobotoolbox/kobo_selenium_tests
kobo_selenium_tests/selenium_ide_exported/create_form_test_template.py
Python
gpl-3.0
7,253
0.011995
# -*- coding: utf-8 -*- from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import Select from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import NoAlertPresentException import unittest, time, re class CreateFormTestTemplate(unittest.TestCase): def setUp(self): self.driver = webdriver.Firefox() self.driver.implicitly_wait(30) self.base_url = "http://kf.kbtdev.org/" self.verificationErrors = [] self.accept_next_alert = True def test_create_form_test_template(self): driver = self.driver driver.get(self.base_url + "") for i in range(60): try: if self.is_element_present(By.CSS_SELECTOR, ".forms-header__title"): break except: pass time.sleep(1) else: self.fail("time out") self.assertFalse(self.is_element_present(By.CSS_SELECTOR, ".forms__card")) self.assertTrue(self.is_element_present(By.CSS_SELECTOR, ".forms-empty__button")) driver.find_element_by_css_selector(".forms-empty__button").click() for i in range(60): try: if self.is_element_present(By.CSS_SELECTOR, ".forms__addf
orm__start"): break except: pass ti
me.sleep(1) else: self.fail("time out") # Click the form creation button using JavaScript to avoid element not visible errors. # WARNING: The 'runScript' command doesn't export to python, so a manual edit is necessary. # ERROR: Caught exception [ERROR: Unsupported command [runScript | $(".forms__addform__start").click(); | ]] for i in range(60): try: if self.is_element_present(By.CSS_SELECTOR, ".form-title"): break except: pass time.sleep(1) else: self.fail("time out") driver.find_element_by_css_selector(".form-title").click() for i in range(60): try: if self.is_element_present(By.CSS_SELECTOR, ".survey-header__title input"): break except: pass time.sleep(1) else: self.fail("time out") driver.find_element_by_css_selector(".survey-header__title input").send_keys(Keys.SHIFT, Keys.END, Keys.SHIFT, Keys.DELETE) driver.find_element_by_css_selector(".survey-header__title input").send_keys("Selenium test form title.", Keys.ENTER) self.assertEqual("Selenium test form title.", driver.find_element_by_css_selector(".form-title").text) self.assertTrue(self.is_element_present(By.CSS_SELECTOR, ".survey-editor .fa-plus")) driver.find_element_by_css_selector(".survey-editor .fa-plus").click() for i in range(60): try: if self.is_element_present(By.CSS_SELECTOR, ".row__questiontypes__form > input"): break except: pass time.sleep(1) else: self.fail("time out") driver.find_element_by_css_selector(".row__questiontypes__form > input").send_keys("Selenium test question label.", Keys.TAB) self.assertTrue(self.is_element_present(By.CSS_SELECTOR, ".row__questiontypes__form > button")) driver.find_element_by_css_selector(".row__questiontypes__form > button").click() for i in range(60): try: if self.is_element_present(By.CSS_SELECTOR, ".questiontypelist__item[data-menu-item=\"select_one\"]"): break except: pass time.sleep(1) else: self.fail("time out") driver.find_element_by_css_selector(".questiontypelist__item[data-menu-item=\"select_one\"]").click() for i in range(60): try: if self.is_element_present(By.CSS_SELECTOR, ".card--selectquestion__expansion li:nth-child(1) span"): break except: pass time.sleep(1) else: self.fail("time out") self.assertEqual("Selenium test question label.", driver.find_element_by_css_selector(".card__header-title").text) driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(1) .editable-wrapper span:first-child").click() for i in range(60): try: if self.is_element_present(By.CSS_SELECTOR, ".card--selectquestion__expansion li:nth-child(1) input"): break except: pass time.sleep(1) else: self.fail("time out") driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(1) input").send_keys(Keys.SHIFT, Keys.END, Keys.SHIFT, Keys.DELETE) driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(1) input").send_keys("Selenium test question choice 1.", Keys.ENTER) self.assertEqual("Selenium test question choice 1.", driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(1) span").text) self.assertTrue(self.is_element_present(By.CSS_SELECTOR, ".card--selectquestion__expansion li:nth-child(2) span")) driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(2) span").click() for i in range(60): try: if self.is_element_present(By.CSS_SELECTOR, ".card--selectquestion__expansion li:nth-child(2) input"): break except: pass time.sleep(1) else: self.fail("time out") driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(2) input").send_keys(Keys.SHIFT, Keys.END, Keys.SHIFT, Keys.DELETE) driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(2) input").send_keys("Selenium test question choice 2.", Keys.ENTER) self.assertEqual("Selenium test question choice 2.", driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(2) span").text) self.assertTrue(self.is_element_present(By.ID, "save")) driver.find_element_by_id("save").click() for i in range(60): try: if self.is_element_present(By.CSS_SELECTOR, ".forms__card__title"): break except: pass time.sleep(1) else: self.fail("time out") self.assertEqual("Selenium test form title.", driver.find_element_by_css_selector(".forms__card__title").text) def is_element_present(self, how, what): try: self.driver.find_element(by=how, value=what) except NoSuchElementException, e: return False return True def is_alert_present(self): try: self.driver.switch_to_alert() except NoAlertPresentException, e: return False return True def close_alert_and_get_its_text(self): try: alert = self.driver.switch_to_alert() alert_text = alert.text if self.accept_next_alert: alert.accept() else: alert.dismiss() return alert_text finally: self.accept_next_alert = True def tearDown(self): self.driver.quit() self.assertEqual([], self.verificationErrors) if __name__ == "__main__": unittest.main()
IntelLabs/hpat
examples/series/rolling/series_rolling_median.py
Python
bsd-2-clause
1,804
0
# ***************************************************************************** # Copyright (c) 2020, Intel Corporation All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUD
ING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************
** import pandas as pd from numba import njit @njit def series_rolling_median(): series = pd.Series([4, 3, 5, 2, 6]) # Series of 4, 3, 5, 2, 6 out_series = series.rolling(3).median() return out_series # Expect series of NaN, NaN, 4.0, 3.0, 5.0 print(series_rolling_median())
silkyar/570_Big_Little
build/ARM/mem/protocol/DMA_Controller.py
Python
bsd-3-clause
246
0.004065
from m5.
params import * from m5.SimObject import SimObject from Controller import RubyController class DMA_Controller(RubyController): type = 'DMA_Controller' dma_sequencer = Param.DMASequencer("") reque
st_latency = Param.Int(6, "")
armstrong/armstrong.core.arm_access
armstrong/core/arm_access/tests/__init__.py
Python
apache-2.0
230
0
from pkgutil import extend_path __path__ = extend_path(__path__, __name__) from .access_m
emberships import * from .fields import * from .migrations import * from
.subscription import * from .forms import * from .widgets import *
Tesora/tesora-horizon
openstack_dashboard/contrib/trove/content/database_datastores/tabs.py
Python
apache-2.0
2,343
0
# Copyright 2015 Cloudwatt # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django import template from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import tabs from openstack_dashboard.contrib.trove import api from openstack_dashboard.contrib.trove.content.database_datastores \ import tables class OverviewTab(tabs.Tab): name = _("Overview") slug = "overview" def get_context_data(self, request): return {"datastore": self.tab_group.kwargs['datastore']} def get_template_name(self, request): t
emplate_dir = 'project/database_datastores/%s' datastore = self.tab_group.kwargs['datastore'] template_file = '_detail_overview_%s.html' % datastore.name
try: template.loader.get_template(template_file) except template.TemplateDoesNotExist: # This datastore type does not have a template file # Just use the base template file template_file = '_detail_overview.html' return template_dir % template_file class VersionsTab(tabs.TableTab): table_classes = [tables.VersionsTable] name = _("Versions") slug = "versions_tab" template_name = "horizon/common/_detail_table.html" def get_versions_data(self): datastore = self.tab_group.kwargs['datastore'] try: versions = api.trove.datastore_version_list(self.request, datastore.id) except Exception: msg = _('Unable to get versions list.') exceptions.handle(self.request, msg) versions = [] return versions class DatastoreDetailTabs(tabs.TabGroup): slug = "datastore_detail" tabs = (OverviewTab, VersionsTab) sticky = True
pureqml/qmlcore
compiler/grammar2.py
Python
mit
21,146
0.03263
import re from collections import OrderedDict import compiler.lang as lang doc_next = None doc_prev_component = None doc_root_component = None class CustomParser(object): def match(self, next): raise
Exception("Expression should implement match method") escape_re = re.compile(r"[\0\n\r\v\t\b\f]") escape_map = { '\0': '\\0', '\n': '\\n', '\r': '\\r', '\v': '\\v', '\t': '\\t', '\b': '\\b', '\f': '\\f' } def escape(str): return escape_re.sub(lambda m: escape_map[m.group(0)], str) class StringParser(CustomParser): def match(
self, next): n = len(next) if n < 2: return quote = next[0] if quote != "'" and quote != "\"": return pos = 1 while next[pos] != quote: if next[pos] == "\\": pos += 2 else: pos += 1 if pos >= n: raise Exception("Unexpected EOF while parsing string") return next[:pos + 1] skip_re = re.compile(r'(?:\s+|/\*.*?\*/|//[^\n]*(?:$|\n))', re.DOTALL) COMPONENT_NAME = r'(?:[a-z][a-zA-Z0-9._]*\.)?[A-Z][A-Za-z0-9]*' component_name = re.compile(COMPONENT_NAME) component_name_lookahead = re.compile(COMPONENT_NAME + r'\s*{') identifier_re = re.compile(r'[a-z_][A-Za-z0-9_]*') property_type_re = re.compile(r'[a-z][a-z0-9]*', re.IGNORECASE) nested_identifier_re = re.compile(r'[a-z_][A-Za-z0-9_\.]*') function_name_re = re.compile(r'[a-z_][a-z0-9_\.]*', re.IGNORECASE) string_re = StringParser() kw_re = re.compile(r'(?:true|false|null)') NUMBER_RE = r"(?:\d+\.\d+(e[+-]?\d+)?|(?:0x)?[0-9]+)" number_re = re.compile(NUMBER_RE, re.IGNORECASE) percent_number_re = re.compile(NUMBER_RE + r'%', re.IGNORECASE) scale_number_re = re.compile(NUMBER_RE + r's', re.IGNORECASE) rest_of_the_line_re = re.compile(r".*$", re.MULTILINE) json_object_value_delimiter_re = re.compile(r"[,;]") dep_var = re.compile(r"\${(.*?)}") class Expression(object): __slots__ = ('op', 'args') def __init__(self, op, *args): self.op, self.args = op, args def __repr__(self): return "Expression %s { %s }" %(self.op, ", ".join(map(repr, self.args))) def __str__(self): args = self.args n = len(args) if n == 1: return "(%s %s)" %(self.op, args[0]) elif n == 2: return "(%s %s %s)" %(args[0], self.op, args[1]) elif n == 3: op = self.op return "(%s %s %s %s %s)" %(args[0], op[0], args[1], op[1], args[2]) else: raise Exception("invalid argument counter") class Call(object): __slots__ = ('func', 'args') def __init__(self, func, args): self.func = func self.args = args def __repr__(self): return "Call %s { %s }" %(self.func, self.args) def __str__(self): if isinstance(self.func, Literal): name = self.func.term if name[0].islower(): if '.' in name: name = '${%s}' %name else: name = '$this._context.%s' %name else: name = str(self.func) #if lhs is not an literal, than we can't process deps, removing ${xxx} name = dep_var.sub(lambda m: m.group(1), name) return "%s(%s)" %(name, ",".join(map(str, self.args))) class Dereference(object): __slots__ = ('array', 'index') def __init__(self, array, index): self.array = array self.index = index def __str__(self): return "(%s[%s])" %(self.array, self.index) class Literal(object): __slots__ = ('lbp', 'term', 'identifier') def __init__(self, term, string = False, identifier = False): self.term = escape(term) if string else term self.lbp = 0 self.identifier = identifier def nud(self, state): return self def __repr__(self): return "Literal { %s }" %self.term def __str__(self): return "${%s}" %self.term if self.identifier and self.term[0].islower() else self.term class PrattParserState(object): def __init__(self, parent, parser, token): self.parent, self.parser, self.token = parent, parser, token class PrattParser(object): def __init__(self, ops): symbols = [(x.term, x) for x in ops] symbols.sort(key=lambda x: len(x[0]), reverse=True) self.symbols = symbols def next(self, parser): parser._skip() next = parser.next next_n = len(next) for term, sym in self.symbols: n = len(term) if n > next_n: continue keyword = term[-1].isalnum() if next.startswith(term): if keyword and n < next_n and next[n].isalnum(): continue parser.advance(len(term)) return sym next = parser.maybe(kw_re) if next: return Literal(next) next = parser.maybe(percent_number_re) if next: next = next[:-1] return Literal("((%s) / 100 * ${parent.<property-name>})" %next) if next != 100 else "(${parent.<property-name>})" next = parser.maybe(scale_number_re) if next: next = next[:-1] return Literal("((%s) * ${context.<scale-property-name>})" %next) next = parser.maybe(number_re) if next: return Literal(next) next = parser.maybe(function_name_re) if next: return Literal(next, identifier=True) next = parser.maybe(string_re) if next: return Literal(next, string=True) return None def advance(self, state, expect = None): if expect is not None: state.parser.read(expect, "Expected %s in expression" %expect) state.token = self.next(state.parser) def expression(self, state, rbp = 0): parser = state.parser t = state.token state.token = self.next(parser) if state.token is None: return t left = t.nud(state) while state.token is not None and rbp < state.token.lbp: t = state.token self.advance(state) left = t.led(state, left) return left def parse(self, parser): token = self.next(parser) if token is None: parser.error("Unexpected expression") state = PrattParserState(self, parser, token) return self.expression(state) class UnsupportedOperator(object): __slots__ = ('term', 'lbp', 'rbp') def __init__(self, term, lbp = 0, rbp = 0): self.term, self.lbp, self.rbp = term, lbp, rbp def nud(self, state): state.parser.error("Unsupported prefix operator %s" %self.term) def led(self, state, left): state.parser.error("Unsupported postfix operator %s" %self.term) def __repr__(self): return "UnsupportedOperator { %s %s }" %(self.term, self.lbp) class Operator(object): __slots__ = ('term', 'lbp', 'rbp') def __init__(self, term, lbp = 0, rbp = None): self.term, self.lbp, self.rbp = term, lbp, rbp def nud(self, state): if self.rbp is not None: return Expression(self.term, state.parent.expression(state, self.rbp)) state.parser.error("Unexpected token in infix expression: '%s'" %self.term) def led(self, state, left): if self.lbp is not None: return Expression(self.term, left, state.parent.expression(state, self.lbp)) else: state.parser.error("No left-associative operator defined") def __repr__(self): return "Operator { %s %s %s }" %(self.term, self.lbp, self.rbp) class Conditional(object): __slots__ = ('term', 'lbp') def __init__(self, lbp): self.term = '?' self.lbp = lbp def nud(self, state): state.parser.error("Conditional operator can't be used as unary") def led(self, state, left): true = state.parent.expression(state) state.parent.advance(state, ':') false = state.parent.expression(state) return Expression(('?', ':'), left, true, false) def __repr__(self): return "Conditional { }" class LeftParenthesis(object): __slots__ = ('term', 'lbp') def __init__(self, lbp): self.term = '(' self.lbp = lbp def nud(self, state): expr = state.parent.expression(state) state.parent.advance(state, ')') return expr def led(self, state, left): args = [] next = state.token if next.term != ')': while True: args.append(state.parent.expression(state)) if state.token is not None: state.parser.error("Unexpected token %s" %state.token) if not state.parser.maybe(','): break state.parent.advance(state) state.parent.advance(state, ')') return Call(left, args) def __repr__(self): return "LeftParenthesis { %d }" %self.lbp class LeftSquareBracket(object): __slots__ = ('term', 'lbp') def __init__(self, lbp): self.term = '[' self.lbp = lbp def nud(self, state): state.parser.error("Invalid [] expression") def led(self, state, left): arg = state.parent.expression(state) if state.token is not None: state.parser.error("Unexpected token %s" %state.token) state.parent.adva
domenicosolazzo/jroc
jroc/nlp/wordnet/WordnetManager.py
Python
gpl-3.0
8,043
0.005098
# -*- coding: utf-8 -*- import itertools """ Languages | ShortCode | Wordnet Albanian | sq | als Arabic | ar | arb Bulgarian | bg | bul Catalan | ca | cat Chinese | zh | cmn Chinese (Taiwan) | qn | qcn Greek | el | ell Basque | eu | eus Persian | fa | fas Finish | fi | fin French | fr | fra Galician | gl | glg Hebrew | he | heb Croatian | hr | hrv Indonesian | id | ind Italian | it | ita Japanese | ja | jpn Norwegian NyNorsk | nn | nno Norwegian Bokmål | nb/no | nob Polish | pl | pol Portuguese | pt | por Slovenian | sl | slv Spanish | es | spa Swedish | sv | swe Thai | tt | tha Malay | ms | zsm """ """ Language short codes => Wordnet Code """ AVAILABLE_LANGUAGES = dict([('sq','als'), ('ar', 'arb'), ('bg', 'bul'), ('ca', 'cat'), ('da', 'dan'), ('zh', 'cmn'), ('el','ell'), ('eu', 'eus'), ('fa', 'fas'), ('fi', 'fin'), ('fr', 'fra'), ('gl','glg'), ('he', 'heb'), ('hr', 'hrv'), ('id', 'ind'), ('it', 'ita'), ('ja','jpn'), ('nn', 'nno'), ('nb', 'nob'), ('no', 'nob'), ('pl', 'pol'), ('pt', 'por'), ('qn','qcn'), ('sl', 'slv'), ('es', 'spa'), ('sv', 'swe'), ('tt', 'tha'), ('ms', 'zsm'), ('en', 'eng')]) """ Language names => Short Code """ AVAILABLE_LANGUAGES_NAMES = dict([ ('albanian', 'sq'), ('arabic', 'ar'),('bulgarian', 'bg'), ('catalan', 'cat'), ('danish', 'da'), ('chinese', 'zh'), ('basque', 'eu'), ('persian', 'fa'), ('finnish', 'fi'), ('france', 'fr'), ('galician', 'gl'), ('hebrew', 'he'), ('croatian', 'hr'), ('indonesian', 'id'), ('italian', 'it'), ('japanese', 'ja'), ('norwegian_nynorsk', 'nn'), ('norwegian', 'no'), ('norwegian_bokmal', 'nb'), ('polish', 'pl'), ('portuguese', 'pt'), ('slovenian', 'sl'), ('spanish', 'es'), ('swedish', 'sv'), ('thai', 'sv'), ('malay', 'ms'), ('english', 'en') ]) class WordnetManager(object): def __init__(self, language="en"): """ Constructor for the wordnet manager. It takes a main language. """ self.__language = language def __isLanguageAvailable(self, code=None, language_name=None): """ Check if a language is available """ if code is None and language_name is None: raise Exception("Error evaluating the correct language") if code is not None and code.lower() in AVAILABLE_LANGUAGES: return True if language_name is not None and language_name.lower() in AVAILABLE_LANGUAGES_NAMES: return True return False def __nameToWordnetCode(self, name): """ It returns the wordnet code for a given language name """ if not self.__isLanguageAvailable(language_name=name): raise Exception("Wordnet code not found for the language name %s " % name) name = name.lower() languageShortCode = AVAILABLE_LANGUAGES_NAMES[name] wordnetCode = self.__shortCodeToWordnetCode(code=languageShortCode) return wordnetCode def __shortCodeToWordnetCode(self, shortCode): """ It returns the wordnet code from a given language short code """ if not self.__isLanguageAvailable(code=shortCode): raise Exception("Wordnet code not found for the language short code %s " % shortCode) code = shortCode.lower() wordnetCode = AVAILABLE_LANGUAGES[code] return wordnetCode def __getSynsets(self, word, wordNetCode): """ It returns the synsets given both word and language code """ from nltk.corpus import wordnet as wn synsets = wn.synsets(word, lang=wordNetCode) return synsets def getLemmas(self, word, languageCode="en"): """ Get the lemmas for a given word :word: The word :languageCode: The language for a given lemma """ wnCode = self.__shortCodeToWordnetCode(shortCode=languageCode) synsets = self.__getSynsets(word, wnCode) #wn.synsets(word, lang=wnCode) lemmas = dict([('en', [])]) for synset in synsets:
enLemmas = synset.lemma_names() lemmas['en'].extend(enLemmas) if languageCode != "en" and self.__isLanguageAvailable(code=languageCode):
langLemmas = list(sorted(set(synset.lemma_names(lang=wnCode)))) lemmas[languageCode] = langLemmas lemmas['en'] = list(sorted(set(lemmas.get('en', [])))) return lemmas def getSynonyms(self, words=[], language_code="en"): """ Get the synonyms from a list of words. :words: A list of words :language_code: the language for the synonyms. """ if words is None or not isinstance(words, list) or list(words) <= 0: return [] if not self.__isLanguageAvailable(code=language_code): return [] wnCode = self.__shortCodeToWordnetCode(language_code) result = {} for word in words: result[word] = dict([('lemmas', self.getLemmas(word,languageCode=language_code))]) return result def getHyponyms(self, words, language_code="en"): """ Get specific synsets from a given synset """ wnCode = self.__shortCodeToWordnetCode(language_code) result = {} for word in words: synonyms = self.__getSynsets(word, wnCode) hyponyms = [hyp for synset in synonyms for hyp in synset.hyponyms()] engLemmas = [hyp.lemma_names() for hyp in hyponyms] lemmas = dict([('en', list(sorted(set(itertools.chain.from_iterable(engLemmas)), key=lambda s: s.lower())))]) if language_code != "en": languageLemmas = [hyp.lemma_names(lang=wnCode) for hyp in hyponyms] languageLemmas = list(sorted(set(itertools.chain.from_iterable(languageLemmas)), key=lambda s: s.lower())) lemmas[language_code] = languageLemmas result[word] = dict([ ('lemmas', lemmas), ('language', language_code)]) return result def getHypernyms(self, words, language_code="en"): """ Get general synsets from a given synset """ wnCode = self.__shortCodeToWordnetCode(language_code) result = {} for word in words: synonyms = self.__getSynsets(word, wnCode) hypernyms = [hyp for synset in synonyms for hyp in synset.hypernyms()] engLemmas = [hyp.lemma_names() for hyp in hypernyms] lemmas = dict([('en', list(sorted(set(itertools.chain.from_iterable(engLemmas)), key=lambda s: s.lower())))]) if language_code != "en": languageLemmas = [hyp.lemma_names(lang=wnCode) for hyp in hypernyms] languageLemmas = list(sorted(set(itertools.chain.from_iterable(languageLemmas)), key=lambda s: s.lower())) lemmas[language_code] = languageLemmas result[word] = dict([ ('lemmas', lemmas), ('language', language_code)]) return result
alfa-jor/addon
plugin.video.alfa/channels/porndish.py
Python
gpl-3.0
3,595
0.013634
# -*- coding: utf-8 -*- #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os, sys from platformcode import config, logger from core import scrapertools from core.item import Item from core import servertools from core import httptools host = 'https://www.porndish.com' def mainlist(item): logger.info() itemlist = [] itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host)) itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host)) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = host + "/?s=%s" % texto try: return lista(item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def categorias(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data) patron = '<li id="menu-item-\d+".*?' patron += '<a href="([^"]+)">([^<]+)<' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedurl = urlparse.urljoin(item.url,scrapedurl) scrapedthumbnail = "" itemlis
t.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) ) return itemlist def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data) data = scrapertools.find_single_match(data, 'a
rchive-body">(.*?)<div class="g1-row g1-row-layout-page g1-prefooter">') patron = '<article class=.*?' patron += 'src="([^"]+)".*?' patron += 'title="([^"]+)".*?' patron += '<a href="([^"]+)" rel="bookmark">' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedthumbnail,scrapedtitle,scrapedurl in matches: thumbnail = scrapedthumbnail plot = "" itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, fanart=thumbnail, thumbnail=thumbnail, plot=plot, contentTitle = scrapedtitle)) next_page = scrapertools.find_single_match(data, '<a class="g1-delta g1-delta-1st next" href="([^"]+)">Next</a>') if next_page: next_page = urlparse.urljoin(item.url,next_page) itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist def play(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data) patron = '<iframe src="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for url in matches: itemlist.append(item.clone(action="play", title= "%s" , contentTitle=item.title, url=url)) itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) a = len (itemlist) for i in itemlist: if a < 1: return [] res = servertools.check_video_link(i.url, i.server, timeout=5) a -= 1 if 'green' in res: return [i] else: continue
kkdang/synapsePythonClient
synapseclient/cache.py
Python
apache-2.0
11,025
0.004444
# Note: Even though this has Sphinx format, this is not meant to be part of the public docs """ ************ File Caching ************ Implements a cache on local disk for Synapse file entities and other objects with a `FileHandle <https://rest.synapse.org/org/sagebionetworks/repo/model/file/FileHandle.html>`_. This is part of the internal implementation of the client and should not be accessed directly by users of the client. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from builtins import str import collections import datetime import json import operator import os import re import shutil import six from math import floor import synapseclient.utils as utils from synapseclient.lock import Lock from synapseclient.exceptions import * CACHE_ROOT_DIR = os.path.join('~', '.synapseCache') def epoch_time_to_iso(epoch_time): """ Convert seconds since unix epoch to a string in ISO format """ return None if epoch_time is None else utils.datetime_to_iso(utils.from_unix_epoch_time_secs(epoch_time)) def iso_time_to_epoch(iso_time): """ Convert an ISO formatted time into seconds since unix epoch """ return None if iso_time is None else utils.to_unix_epoch_time_secs(utils.iso_to_datetime(iso_time)) def compare_timestamps(modified_time, cached_time): """ Compare two ISO formatted timestamps, with a special case when cached_time ends in .000Z. For backward compatibility, we always write .000 for milliseconds into the cache. We then match a cached time ending in .000Z, meaning zero milliseconds with a modified time with any number of milliseconds. :param modified_time: float representing seconds since unix epoch :param cached_time: string holding a ISO formatted time """ if cached_time is None or modified_time is None: return False if cached_time.endswith(".000Z"): return cached_time == epoch_time_to_iso(floor(modified_time)) else: return cached_time == epoch_time_to_iso(modified_time) def _get_modified_time(path): if os.path.exists(path): return os.path.getmtime(path) return None class Cache(): """ Represent a cache in which files are accessed by file handle ID. """ def __init__(self, cache_root_dir=CACHE_ROOT_DIR, fanout=1000): ## set root dir of cache in which meta data will be stored and files ## will be stored here by default, but other locations can be specified cache_root_dir = os.path.expanduser(cache_root_dir) if not os.path.exists(cache_root_dir): os.makedirs(cache_root_dir) self.cache_root_dir = cache_root_dir self.fanout = fanout self.cache_map_file_name = ".cacheMap" def get_cache_dir(self, file_handle_id): if isinstance(file_handle_id, collections.Mapping): if 'dataFileHandleId' in file_handle_id: file_handle_id = file_handle_id['dataFileHandleId'] elif 'concreteType' in file_handle_id and 'id' in file_handle_id and file_handle_id['concreteType'].startswith('org.sagebionetworks.repo.model.file'): file_handle_id = file_handle_id['id'] return os.path.join(self.cache_root_dir, str(int(file_handle_id) % self.fanout), str(file_handle_id)) def _read_cache_map(self, cache_dir): cache_map_file = os.path.join(cache_dir, self.cache_map_file_name) if not os.path.exists(cache_map_file): return {} with open(cache_map_file, 'r') as f: cache_map = json.load(f) return cache_map def _write_cache_map(self, cache_dir, cache_map): if not os.path.exists(cache_dir): os.makedirs(cache_dir) cache_map_file = os.path.join(cache_dir, self.cache_map_file_name) with open(cache_map_file, 'w') as f: json.dump(cache_map, f) f.write('\n') # For compatibility with R's JSON parser def contains(self, file_handle_id, path):
""" Given a file and file_handle_id, return True if an unmodified cached copy of the file exists at the exact path given or False otherwise. :param file_handle_id: :param path: file path at which to look for a cached
copy """ cache_dir = self.get_cache_dir(file_handle_id) if not os.path.exists(cache_dir): return False with Lock(self.cache_map_file_name, dir=cache_dir): cache_map = self._read_cache_map(cache_dir) path = utils.normalize_path(path) cached_time = cache_map.get(path, None) if cached_time: return True if compare_timestamps(_get_modified_time(path), cached_time) else False def get(self, file_handle_id, path=None): """ Retrieve a file with the given file handle from the cache. :param file_handle_id: :param path: If the given path is None, look for a cached copy of the file in the cache directory. If the path is a directory, look there for a cached copy. If a full file-path is given, only check whether that exact file exists and is unmodified since it was cached. :returns: Either a file path, if an unmodified cached copy of the file exists in the specified location or None if it does not """ cache_dir = self.get_cache_dir(file_handle_id) if not os.path.exists(cache_dir): return None with Lock(self.cache_map_file_name, dir=cache_dir): cache_map = self._read_cache_map(cache_dir) path = utils.normalize_path(path) ## If the caller specifies a path and that path exists in the cache ## but has been modified, we need to indicate no match by returning ## None. The logic for updating a synapse entity depends on this to ## determine the need to upload a new file. if path is not None: ## If we're given a path to a directory, look for a cached file in that directory if os.path.isdir(path): for cached_file_path, cached_time in six.iteritems(cache_map): if path == os.path.dirname(cached_file_path): return cached_file_path if compare_timestamps(_get_modified_time(cached_file_path), cached_time) else None ## if we're given a full file path, look up a matching file in the cache else: cached_time = cache_map.get(path, None) if cached_time: return path if compare_timestamps(_get_modified_time(path), cached_time) else None ## return most recently cached and unmodified file OR ## None if there are no unmodified files for cached_file_path, cached_time in sorted(cache_map.items(), key=operator.itemgetter(1), reverse=True): if compare_timestamps(_get_modified_time(cached_file_path), cached_time): return cached_file_path return None def add(self, file_handle_id, path): """ Add a file to the cache """ if not path or not os.path.exists(path): raise ValueError("Can't find file \"%s\"" % path) cache_dir = self.get_cache_dir(file_handle_id) with Lock(self.cache_map_file_name, dir=cache_dir): cache_map = self._read_cache_map(cache_dir) path = utils.normalize_path(path) ## write .000 milliseconds for backward compatibility cache_map[path] = epoch_time_to_iso(floor(_get_modified_time(path))) self._write_cache_map(cache_dir, cache_map) return cache_map def remove(self, file_handle_id, path=None, delete=None): """ Remove a file from the cache. :param file_handle_id: Will also extract file handle id from either a File or file handle :param path: If the give
dana-i2cat/felix
optin_manager/src/python/openflow/optin_manager/urls.py
Python
apache-2.0
2,782
0.006111
from django.conf.urls.defaults import * from django.contrib import admin from django.conf import settings from expedient.common.rpc4django.utils import rpc_url from openflow.common.utils.OptinThemeManager import OptinThemeManager OptinThemeManager.initialize() admin.autodiscover() urlpatterns = patterns('', (r'^$', 'openflow.optin_manager.users.views.index'), url(r'^dashboard$', 'openflow.optin_manager.users.
views.dashboard', name="dashboard"), url(r'^change_profile$', 'openflow.optin_manager.users.views.change_profile', name="change_profile"), (r'^controls/', include('openflow.optin_manager.controls.urls')), (r'^opts/', include('openflow.optin_manager.opts.urls')), (r'^admin_manager/', include('openflow.optin_manager.admin_manager.urls')), (r'^xmlrpc/', inc
lude('openflow.optin_manager.xmlrpc_server.urls')), # For testing (r'^dummyfv/', include('openflow.optin_manager.dummyfv.urls')), (r'^admin/', include(admin.site.urls)), (r'^accounts/', include('registration.urls')), # sfa rpc_url(r'^xmlrpc/sfa/?$', name='optin_sfa'), rpc_url(r'^xmlrpc/geni/3/?$', name='gapi3') ) #static_file_tuple = (r'^%s/(?P<path>.*)$' % settings.MEDIA_URL[1:], # 'django.views.static.serve', # {'document_root': "%s" % settings.MEDIA_ROOT}) #static_js_tuple = (r'^%s/(?P<path>.*)$' % str(settings.MEDIA_URL[1:]+"/js/"), # 'django.views.static.serve', # {'document_root': "%s" % settings.MEDIA_ROOT}) #urlpatterns += patterns('', # TODO: Serve static content, should be removed in production deployment # serve from another domain to speed up connections (no cookies needed) # url(*static_file_tuple, name="img_media"), # url(*static_file_tuple, name="css_media"), # url(*static_js_tuple, name="js_media"),) def get_static_url(name, path=""): static_file_tuple = ( r'^%s%s/(?P<path>.*)$' % (settings.MEDIA_URL[1:], path), 'django.views.static.serve', {'document_root': "%s%s" % (settings.MEDIA_ROOT, path)}) return url(*static_file_tuple, name=name) ''' Static content ''' urlpatterns += patterns('', get_static_url("img_media", "/default"), get_static_url("css_media", "/default"), get_static_url("js_media", "/default/js"), ) ''' Static theme content ''' img_theme_tuple = OptinThemeManager.getStaticThemeTuple("img_media") css_theme_tuple = OptinThemeManager.getStaticThemeTuple("css_media") js_theme_tuple = OptinThemeManager.getStaticThemeTuple("js_media") urlpatterns += patterns('', get_static_url(img_theme_tuple[0],img_theme_tuple[1]), get_static_url(css_theme_tuple[0],css_theme_tuple[1]), get_static_url(js_theme_tuple[0],js_theme_tuple[1]), )
eduNEXT/edx-platform
lms/djangoapps/mobile_api/utils.py
Python
agpl-3.0
223
0
""" Comm
on utility methods for Mobile APIs. """ API_V05 = 'v0.5' API_V1 = 'v1' def parsed_version(version): """ Converts string X.X.X.Y to int tuple (X, X, X) """ retur
n tuple(map(int, (version.split(".")[:3])))
h2oai/h2o-3
h2o-bindings/bin/custom/python/gen_psvm.py
Python
apache-2.0
7,069
0.003537
examples = dict( disable_training_metrics=""" >>> from h2o.estimators import H2OSupportVectorMachineEstimator >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(gamma=0.01, ... rank_ratio=0.1, ... disable_training_metrics=False) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, fact_threshold=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(disable_training_metrics=False, ... fact_threshold=1e-7) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, feasible_threshold=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(disable_training_metrics=False, ... fact_threshold=1e-7) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, gamma=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(gamma=0.01, ... rank_ratio=0.1, ... disable_training_metrics=False) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, hyper_param=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(gamma=0.01, ... rank_ratio=0.1, ... hyper_param=0.01, ... disable_training_metrics=False) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, ignore_const_cols=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(gamma=0.01, ... rank_ratio=0.1, ... ignore_const_cols=False, ... disable_training_metrics=False) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, kernel_type=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(gamma=0.1, ... rank_ratio=0.1, ... hyper_param=0.01, ... kernel_type="gaussian", ... disable_training_metrics=False) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, max_iterations=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(gamma=0.1, ... rank_ratio=0.1, ... hyper_param=0.01, ... max_iterations=20, ... disable_training_metrics=False) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, mu_factor=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(gamma=0.1, ... mu_factor=100.5, ... disable_training_metrics=False) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, negative_weight=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(gamma=0.1, ... rank_ratio=0.1, ... negative_weight=10, ... disable_training_metrics=False) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, positive_weight=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(gamma=0.1, ... rank_ratio=0.1, ... positive_weight=0.1, ... disable_training_metrics=False) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, rank_ratio=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(gamma=0.01, ... rank_ratio=0.1, ... disable_training_metrics=False) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, seed=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(gamma=0.1, ... rank_ratio=0.1, ... seed=1234, ... disable_training_metrics=False) >>> svm.train(y="C1", training_frame=splice) >>> svm.model_performance """, surrogate_gap_threshold=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(gamma=0.01, ... rank_ratio=0.1, ... surrogate_gap_threshold=0.1, ... disable_training_metrics=False) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, sv_threshold=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> svm = H2OSupportVectorMachineEstimator(
gamma=0.01, ... rank_ratio=0.1, ... sv_threshold=0.01, ... disable_training_metrics=False) >>> svm.train(y="C1", training_frame=splice) >>> svm.mse() """, t
raining_frame=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> train, valid = splice.split_frame(ratios=[0.8]) >>> svm = H2OSupportVectorMachineEstimator(disable_training_metrics=False) >>> svm.train(y="C1", training_frame=train) >>> svm.mse() """, validation_frame=""" >>> splice = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/splice/splice.svm") >>> train, valid = splice.split_frame(ratios=[0.8]) >>> svm = H2OSupportVectorMachineEstimator(disable_training_metrics=False) >>> svm.train(y="C1", training_frame=train, validation_frame=valid) >>> svm.mse() """ )
adrianmoisey/github3.py
tests/unit/helper.py
Python
bsd-3-clause
1,888
0
try: from unittest import mock except ImportError: import mock import github3 import unittest def build_url(self, *args, **kwargs): # We want to assert what is happening with the actual calls to the # Internet. We can proxy this. return github3.session.GitHubSession().build_url(*args, **kwargs) class UnitHelper(unittest.TestCase): # Sub-classes must assign the class to this durin
g definition described_class = None # Sub-classes must also assign a dictionary to this during definition example_data = {} def create_mocked_session(self): MockedSession = mock.create_a
utospec(github3.session.GitHubSession) return MockedSession() def create_session_mock(self, *args): session = self.create_mocked_session() base_attrs = ['headers', 'auth'] attrs = dict( (key, mock.Mock()) for key in set(args).union(base_attrs) ) session.configure_mock(**attrs) session.delete.return_value = None session.get.return_value = None session.patch.return_value = None session.post.return_value = None session.put.return_value = None return session def create_instance_of_described_class(self): if self.example_data: instance = self.described_class(self.example_data, self.session) else: instance = self.described_class() instance._session = self.session return instance def setUp(self): self.session = self.create_session_mock() self.instance = self.create_instance_of_described_class() # Proxy the build_url method to the class so it can build the URL and # we can assert things about the call that will be attempted to the # internet self.described_class._build_url = build_url
JulyKikuAkita/PythonPrac
cs15211/BalancedBinaryTree.py
Python
apache-2.0
5,434
0.009201
__source__ = 'https://leetcode.com/problems/balanced-binary-tree/#/description' # https://github.com/kamyu104/LeetCode/blob/master/Python/balanced-binary-tree.py # Time: O(n) # Space: O(h), h is height of binary tree # divide and conquer # # Description: Leetcode # 110. Balanced Binary Tree # # Given a binary tree, determine if it is height-balanced. # # For this problem, a height-balanced binary tree is defined as a binary tree # in which the depth of the two subtrees of every node never differ by more than 1. # # Companies # Bloomberg # Related Topics # Tree Depth-first Search # Similar Questions # Maximum Depth of Binary Tree # import unittest # Definition for a binary tree node class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: # @param root, a tree node # @return a boolean def isBalanced(self, root): return (self.getHeight(root) >= 0) def getHeight(self, root): if root is None: return 0 left_height = self.getHeight(root.left) right_height = self.getHeight(root.right) if left_height < 0 or right_height < 0 or abs(left_height - right_height) > 1: return -1 return max(left_height, right_height) + 1 #http://www.programcreek.com/2013/02/leetcode-balanced-binary-tree-java/ class javaSolution: # @param root, a tree node # @return a boolean def isBalanced(self, root): if not root: return None if self.getHeight(root) == -1: return False return True def getHeight(self, root): if not root: return 0 left = self.getHeight(root.left) right = self.getHeight(root.right) if left == -1 or right == -1: return -1 if abs(left - right) > 1: return -1 return max(left, right) + 1 class SolutionOther: # @param root, a tree node # @return a boolean # http://www.cnblogs.com/zuoyuan/p/3720169.html def isBalanced(self, root): if root == None: return True if abs(self.Height(root.left) - self.Height(root.right)) <= 1: return self.isBalanced(root.left) and self.isBalanced(root.right) else: return False def Height(self, root) : if root == None: return 0 return max(self.Height(root.left), self.Height(root.right)) +1 #############test #creating BST tree #### root0=TreeNode(0) tree1=TreeNode(1) tree2=TreeNode(2) tree3=TreeNode(3) tree4=TreeNode(4) tree5=TreeNode(5) tree6=TreeNode(6) root0.left=tree1 #root0.right=tree2 tree1.left=tree3 tree1.right=tree4 tree2.left=tree5 #tree2.right=tree6 #end of creating BST tree #### #test test = SolutionOther() print test.isBalanced(root0) #print test.isBalanced3(root0) #print test.isBalanced2(root0) class TestMethods(unittest.TestCase): def test_Local(self): self.assertEqual(1, 1) root = TreeNode(0) root.left = TreeNode(1) result = Solution().isBalanced(root) print result root.left.left = TreeNode(2) result = javaSolution().isBalanced(root) print result if __name__ == '__main__': unittest.main() Java = ''' #Thought: https://leetcode.com/problems/contains-duplicate/solution/ Thought: This problem is generally believed to have two solutions: the top down approach and the bottom up way. DFS 1) The first method checks whether the tree is balanced strictly according to the definition of balanced binary tree: the difference between the heights of the two sub trees are not bigger than 1, and both the left sub tree and right sub tree are also balanced. With the helper function depth(), we could easily write the code; For the current node root, calling depth() for its left and right children actually has to access all of its children, thus the complexity is O(N). We do this for eac
h node in the tree, so the overall complexity of isBalanced will be O(N^2). This is a top down approa
ch. DFS 2)The second method is based on DFS. Instead of calling depth() explicitly for each child node, we return the height of the current node in DFS recursion. When the sub tree of the current node (inclusive) is balanced, the function dfsHeight() returns a non-negative value as the height. Otherwise -1 is returned. According to the leftHeight and rightHeight of the two children, the parent node could check if the sub tree is balanced, and decides its return value. # DFS # 87.89% 1ms class Solution { public boolean isBalanced(TreeNode root) { return dfsHeight(root) != -1; } public int dfsHeight(TreeNode root) { if (root == null) return 0; int left = dfsHeight(root.left); int right = dfsHeight(root.right); if (left == -1 || right == -1 || Math.abs(left - right) > 1) return -1; return Math.max(left, right) + 1; } } # DFS # 87.89% 1ms class Solution { public boolean isBalanced(TreeNode root) { if (root == null) return true; int left = getDpeth(root.left); int right = getDpeth(root.right); return Math.abs(left - right) <= 1 && isBalanced(root.left) && isBalanced(root.right); } public int getDpeth(TreeNode root) { if (root == null) return 0; return Math.max(getDpeth(root.left), getDpeth(root.right)) + 1; } } '''
acutesoftware/worldbuild
tests/test_crafting.py
Python
gpl-2.0
1,300
0.013077
#!/usr/bin/python3 # -*- coding: utf-8 -*- # test_crafting.py import os import sys import unittest root_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + ".." ) pth = root_folder #+ os.sep + 'worldbuild' sys.path.append(pth) from worldbuild.crafting import craft as mod_craft class TestTemplate(unittest.TestCase): def se
tUp(self): unittest.TestCase.setUp(self) def tearDown(self): unittest.TestCase.tearDown(self) def test_01_recipe(self): res = mod_craft.Recipe('1', 'new recipe','20','mix') #print(res) self.assertEqual(str(res),'new recipe') def test_02_dataset_recipe(self): recipes = mod_craft.DataSet(mod_craft.Recipe, mod_c
raft.get_fullname('recipes.csv')) self.assertTrue(len(recipes.object_list) > 18) tot_time_to_build = 0 for recipe in recipes.object_list: #print(recipe) tot_time_to_build += int(recipe.base_time_to_build) #print('total time to build all recipes = ' + str(tot_time_to_build)) self.assertEqual(str(recipes.object_list[0]), 'Torch') self.assertEqual(str(recipes.object_list[1]), 'Wooden Plank') self.assertTrue(tot_time_to_build > 10) if __name__ == '__main__': unittest.main()
zstackorg/zstack-woodpecker
integrationtest/vm/virtualrouter/vlan/test_chg_instance_offering_online2.py
Python
apache-2.0
2,179
0.010096
''' @author: Quarkonics ''' import os import zstackwoodpecker.test_util as test_util import zstackwoodpecker.test_lib as test_lib import zstackwoodpecker.test_state as test_state import zstackwoodpecker.operations.host_operations as host_ops import zstackwoodpecker.operations.resource_operations as res_ops import zstackwoodpecker.operations.vm_operations as vm_ops test_stub = test_lib.lib_get_test_stub() test_obj_dict = test_state.TestStateDict() def test(): test_util.test_dsc('Test VM online change instance offering') image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid l3_net_list = [l3_net_uuid] cpuNum = 2 memorySize = 666 * 1024 * 1024 new_offering = test_lib.lib_create_instance_offering(cpuNum = cpuNum,\ memorySize = memorySi
ze) test_obj_dict.add_instance_offering(new_offering) new_offering_uuid = new_offering.uuid vm = te
st_stub.create_vm(l3_net_list, image_uuid, 'online_chg_offering_vm', instance_offering_uuid=new_offering_uuid, system_tags=['instanceOfferingOnlinechange::true']) test_obj_dict.add_vm(vm) cpuNum = 1 memorySize = 222 * 1024 * 1024 new_offering2 = test_lib.lib_create_instance_offering(cpuNum = cpuNum,\ memorySize = memorySize) test_obj_dict.add_instance_offering(new_offering2) new_offering_uuid2 = new_offering2.uuid vm.change_instance_offering(new_offering_uuid2) cpuNum = 1 memorySize = 444 * 1024 * 1024 new_offering3 = test_lib.lib_create_instance_offering(cpuNum = cpuNum,\ memorySize = memorySize) test_obj_dict.add_instance_offering(new_offering3) new_offering_uuid3 = new_offering3.uuid vm.change_instance_offering(new_offering_uuid3) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('VM online change instance offering Test Pass') #Will be called only if exception happens in test(). def error_cleanup(): test_lib.lib_error_cleanup(test_obj_dict)
adambreznicky/python
DanMan/IRI_v5_fixer.py
Python
mit
11,750
0.002128
__file__ = 'IRI_v1' __date__ = '5/15/2014' __author__ = 'ABREZNIC' import arcpy, os, datetime, csv, tpp now = datetime.datetime.now() curMonth = now.strftime("%m") curDay = now.strftime("%d") curYear = now.strftime("%Y") today = curYear + "_" + curMonth + "_" + curDay input = arcpy.GetParameterAsText(0) calRhino = arcpy.GetParameterAsText(1) output = arcpy.GetParameterAsText(2) # theMXD = "C:\\TxDOT\\Projects\\IRI_dan\\working\\Untitled.mxd" inputlist = [input] inputcntr = 1 lengthinput = len(inputlist) issuesReport = [["DISTRICT_FILE", "ROUTE_ID", "BEGIN_POINT", "END_POINT", "SECTION_LENGTH", "IRI", "RUTTING", "DATE", "ERROR_DESCRIPTION"]] statsReport = [["DISTRICT_FILE", "LG Record Count", "KG Record Count", "Total Records Count", "Input Record Count", "Lost Records Count", "LG Records Length", "KG Records Length", "Total Routed Length"]] arcpy.CreateFileGDB_management(output, "RhinoLines.gdb") rhinospace = output + os.sep + "RhinoLines.gdb" rhino_lines = rhinospace + os.sep + "rhinolines" arcpy.Copy_management(calRhino, rhino_lines) # arcpy.AddField_management(rhino_lines, "FRM_DFO", "DOUBLE") # arcpy.AddField_management(rhino_lines, "TO_DFO", "DOUBLE") cursor = arcpy.da.UpdateCursor(rhino_lines, ["FRM_DFO", "TO_DFO", 'SHAPE@']) for row in cursor: bp = row[2].firstPoint.M ep = row[2].lastPoint.M bpNew = float(format(float(bp), '.3f')) epNew = float(format(float(ep), '.3f')) row[0] = bpNew row[1] = epNew cursor.updateRow(row) del cursor del row arcpy.AddMessage("Calibrated RHINO copied local.") arcpy.AddField_management(rhino_lines, "RTE_ORDER", "SHORT") arcpy.AddField_management(rhino_lines, "FLAG", "TEXT", "", "", 30) arcpy.AddMessage("Applying RTE_ORDER.") cursor = arcpy.da.UpdateCursor(rhino_lines, ["RTE_ID", "FRM_DFO", "RTE_ORDER", "FLAG", "RU", "F_SYSTEM", "SEC_NHS", "HPMS"], "", "", "", (None, "ORDER BY RTE_ID ASC, FRM_DFO ASC")) counter = 0 order = 1 previous = "" for row in cursor: current = row[0] if counter == 0: row[2] = order elif counter != 0 and previous == current: order += 1 row[2] = order else: order = 1 row[2] = order previous = current counter += 1 ru = int(row[4]) fs = int(row[5]) nhs = int(row[6]) row[3] = current + "-" + str(order) + "-" + str(ru) + "-" + str(fs) + "-" + str(nhs) + "-" + str(row[7]) cursor.updateRow(row) del cursor arcpy.AddMessage("RTE_ORDER applied.") dictionary = {} cursor = arcpy.da.SearchCursor(rhino_lines, ["FLAG", "FRM_DFO", "TO_DFO"]) for row in cursor: flag = row[0] odr = flag.split("-")[0] + "-" + flag.split("-")[1] + "-" + flag.split("-")[2] fDFO = row[1] tDFO = row[2] dictionary[odr] = [fDFO, tDFO] del cursor for excel in inputlist: distName = str(excel).split("\\")[-1] if distName[-1] == "$": distName = distName[:-1] arcpy.AddMessage("Beginning " + str(inputcntr) + " of " + str(lengthinput) + ": " + distName) arcpy.CreateFileGDB_management(output, "Wrkg" + str(inputcntr) + ".gdb") workspace = output + os.sep + "Wrkg" + str(inputcntr) + ".gdb" arcpy.AddMessage("Working database created.") data = [] fields = ["ROUTE_ID", "BEGIN_POINT", "END_POINT", "SECTION_LENGTH", "IRI", "RUTTING", "DATE", "RU", "F_SYSTEM", "SEC_NHS", "HPMS"] data.append(fields) # spref = "Coordinate Systems\\Geographic Coordinate Systems\\World\\GCS_WGS_1984.prj" # spref = "Coordinate Systems\\Geographic Coordinate Systems\\World\\WGS 1984.prj" # arcpy.MakeXYEventLayer_management(excel, "Long", "Lat", "pointEvents" + str(inputcntr), spref) # arcpy.AddMessage("Event Layer created.") pntfeature = workspace + os.sep + "al
lPoints" arcpy.CopyFeatures_management(excel, pntfeature) arcpy.AddMessage("Point feature class created.") arcpy.AddField_management(pntfeature, "RTE_ID_Orig", "TEXT", "", "", 30) initial = 0 ids = [] cursor = arcpy.da.UpdateCursor(pntfeature, ["ROUTE_ID", "ROUTE_ID_Good", "RTE_ID_Orig"]) for row in cursor: id = row[0] id2 = row[1] initi
al += 1 if id2 not in ids: ids.append(id2) row[0] = id2 row[2] = id cursor.updateRow(row) del cursor del row arcpy.AddMessage("RTE_IDs compiled.") roadslayer = "" pointslayer = "" # mxd = arcpy.mapping.MapDocument(theMXD) mxd = arcpy.mapping.MapDocument("CURRENT") df = arcpy.mapping.ListDataFrames(mxd, "*")[0] for lyr in arcpy.mapping.ListLayers(mxd): if lyr.name == "rhinolines": arcpy.mapping.RemoveLayer(df, lyr) if lyr.name == "allPoints": arcpy.mapping.RemoveLayer(df, lyr) newlayerpnt = arcpy.mapping.Layer(pntfeature) arcpy.mapping.AddLayer(df, newlayerpnt) newlayerline = arcpy.mapping.Layer(rhino_lines) arcpy.mapping.AddLayer(df, newlayerline) for lyr in arcpy.mapping.ListLayers(mxd): if lyr.name == "rhinolines": roadslayer = lyr if lyr.name == "allPoints": pointslayer = lyr arcpy.AddMessage("Layers acquired.") counter = 1 total = len(ids) arcpy.AddMessage("Finding measures for: ") for id in ids: roadslayer.definitionQuery = " RTE_ID = '" + id + "' " pointslayer.definitionQuery = " ROUTE_ID = '" + id + "' " arcpy.RefreshActiveView() arcpy.AddMessage(str(counter) + "/" + str(total) + " " + id) label = id.replace("-", "") arcpy.LocateFeaturesAlongRoutes_lr(pointslayer, roadslayer, "FLAG", "230 Feet", workspace + os.sep + label, "FLAG POINT END_POINT") counter += 1 arcpy.AddMessage("Tables created.") # alltables = [] arcpy.env.workspace = workspace tables = arcpy.ListTables() for table in tables: arcpy.AddMessage(table) arcpy.AddField_management(table, "ODR_FLAG", "TEXT", "", "", 20) arcpy.AddMessage("Order Flag field created.") numbDict = {} cursor = arcpy.da.UpdateCursor(table, ["FLAG", "ODR_FLAG"]) for row in cursor: flag = row[0] odr = flag.split("-")[0] + "-" + flag.split("-")[1] + "-" + flag.split("-")[2] if odr not in numbDict.keys(): numbDict[odr] = 1 else: curNumb = numbDict[odr] curNumb += 1 numbDict[odr] = curNumb row[1] = odr cursor.updateRow(row) del cursor counter = 1 previous = "" last = "" cursor = arcpy.da.UpdateCursor(table, ["ODR_FLAG", "BEGIN_POINT", "END_POINT", "SECTION_LENGTH"], None, None, False, (None, "ORDER BY ODR_FLAG ASC, END_POINT ASC")) for row in cursor: current = row[0] total = numbDict[current] if counter == 1 and counter != total: values = dictionary[current] beginner = float(format(float(values[0]), '.3f')) segEnd = float(format(float(row[2]), '.3f')) if abs(segEnd - beginner) > 1: segSrt = segEnd - .1 row[1] = float(format(float(segSrt), '.3f')) row[2] = segEnd row[3] = row[2] - row[1] else: row[1] = beginner row[2] = segEnd row[3] = row[2] - row[1] elif counter == 1 and counter == total: values = dictionary[current] row[1] = float(format(float(values[0]), '.3f')) row[2] = float(format(float(values[1]), '.3f')) row[3] = row[2] - row[1] counter = 0 elif previous == current and counter != total: row[1] = last row[2] = float(format(float(row[2]), '.3f')) row[3] = row[2] - last elif previous == current and counter == total: values = dictionary[current] ender = float(format(float(values[1]), '.3f')) if abs(ender - last) > 1: row[1] = last row[2] = float(format(float(row
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/tests/test_client.py
Python
lgpl-3.0
16,414
0.007006
"""Tests for parallel client.py Authors: * Min RK """ #------------------------------------------------------------------------------- # Copyright (C) 2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Imports #------------------------------------------------------------------------------- from __future__ import division import time from datetime import datetime from tempfile import mktemp import zmq from IPython import parallel from IPython.parallel.client import client as clientmod from IPython.parallel import error fro
m IPython.parallel import AsyncResult, AsyncHubResult from IPython.parallel import LoadBalancedView, DirectView from clienttest import ClusterTestCase, segfault, wait, add_engines def setup(): add_engines(4, total=True) class TestClient(ClusterTestCase): def test_ids(self): n = len(self.client.ids) self.add_engines(2) self.assertEquals(len(self.client.ids), n+2) def test_
view_indexing(self): """test index access for views""" self.minimum_engines(4) targets = self.client._build_targets('all')[-1] v = self.client[:] self.assertEquals(v.targets, targets) t = self.client.ids[2] v = self.client[t] self.assert_(isinstance(v, DirectView)) self.assertEquals(v.targets, t) t = self.client.ids[2:4] v = self.client[t] self.assert_(isinstance(v, DirectView)) self.assertEquals(v.targets, t) v = self.client[::2] self.assert_(isinstance(v, DirectView)) self.assertEquals(v.targets, targets[::2]) v = self.client[1::3] self.assert_(isinstance(v, DirectView)) self.assertEquals(v.targets, targets[1::3]) v = self.client[:-3] self.assert_(isinstance(v, DirectView)) self.assertEquals(v.targets, targets[:-3]) v = self.client[-1] self.assert_(isinstance(v, DirectView)) self.assertEquals(v.targets, targets[-1]) self.assertRaises(TypeError, lambda : self.client[None]) def test_lbview_targets(self): """test load_balanced_view targets""" v = self.client.load_balanced_view() self.assertEquals(v.targets, None) v = self.client.load_balanced_view(-1) self.assertEquals(v.targets, [self.client.ids[-1]]) v = self.client.load_balanced_view('all') self.assertEquals(v.targets, None) def test_dview_targets(self): """test direct_view targets""" v = self.client.direct_view() self.assertEquals(v.targets, 'all') v = self.client.direct_view('all') self.assertEquals(v.targets, 'all') v = self.client.direct_view(-1) self.assertEquals(v.targets, self.client.ids[-1]) def test_lazy_all_targets(self): """test lazy evaluation of rc.direct_view('all')""" v = self.client.direct_view() self.assertEquals(v.targets, 'all') def double(x): return x*2 seq = range(100) ref = [ double(x) for x in seq ] # add some engines, which should be used self.add_engines(1) n1 = len(self.client.ids) # simple apply r = v.apply_sync(lambda : 1) self.assertEquals(r, [1] * n1) # map goes through remotefunction r = v.map_sync(double, seq) self.assertEquals(r, ref) # add a couple more engines, and try again self.add_engines(2) n2 = len(self.client.ids) self.assertNotEquals(n2, n1) # apply r = v.apply_sync(lambda : 1) self.assertEquals(r, [1] * n2) # map r = v.map_sync(double, seq) self.assertEquals(r, ref) def test_targets(self): """test various valid targets arguments""" build = self.client._build_targets ids = self.client.ids idents,targets = build(None) self.assertEquals(ids, targets) def test_clear(self): """test clear behavior""" self.minimum_engines(2) v = self.client[:] v.block=True v.push(dict(a=5)) v.pull('a') id0 = self.client.ids[-1] self.client.clear(targets=id0, block=True) a = self.client[:-1].get('a') self.assertRaisesRemote(NameError, self.client[id0].get, 'a') self.client.clear(block=True) for i in self.client.ids: self.assertRaisesRemote(NameError, self.client[i].get, 'a') def test_get_result(self): """test getting results from the Hub.""" c = clientmod.Client(profile='iptest') t = c.ids[-1] ar = c[t].apply_async(wait, 1) # give the monitor time to notice the message time.sleep(.25) ahr = self.client.get_result(ar.msg_ids) self.assertTrue(isinstance(ahr, AsyncHubResult)) self.assertEquals(ahr.get(), ar.get()) ar2 = self.client.get_result(ar.msg_ids) self.assertFalse(isinstance(ar2, AsyncHubResult)) c.close() def test_get_execute_result(self): """test getting execute results from the Hub.""" c = clientmod.Client(profile='iptest') t = c.ids[-1] cell = '\n'.join([ 'import time', 'time.sleep(0.25)', '5' ]) ar = c[t].execute("import time; time.sleep(1)", silent=False) # give the monitor time to notice the message time.sleep(.25) ahr = self.client.get_result(ar.msg_ids) self.assertTrue(isinstance(ahr, AsyncHubResult)) self.assertEquals(ahr.get().pyout, ar.get().pyout) ar2 = self.client.get_result(ar.msg_ids) self.assertFalse(isinstance(ar2, AsyncHubResult)) c.close() def test_ids_list(self): """test client.ids""" ids = self.client.ids self.assertEquals(ids, self.client._ids) self.assertFalse(ids is self.client._ids) ids.remove(ids[-1]) self.assertNotEquals(ids, self.client._ids) def test_queue_status(self): ids = self.client.ids id0 = ids[0] qs = self.client.queue_status(targets=id0) self.assertTrue(isinstance(qs, dict)) self.assertEquals(sorted(qs.keys()), ['completed', 'queue', 'tasks']) allqs = self.client.queue_status() self.assertTrue(isinstance(allqs, dict)) intkeys = list(allqs.keys()) intkeys.remove('unassigned') self.assertEquals(sorted(intkeys), sorted(self.client.ids)) unassigned = allqs.pop('unassigned') for eid,qs in allqs.items(): self.assertTrue(isinstance(qs, dict)) self.assertEquals(sorted(qs.keys()), ['completed', 'queue', 'tasks']) def test_shutdown(self): ids = self.client.ids id0 = ids[0] self.client.shutdown(id0, block=True) while id0 in self.client.ids: time.sleep(0.1) self.client.spin() self.assertRaises(IndexError, lambda : self.client[id0]) def test_result_status(self): pass # to be written def test_db_query_dt(self): """test db query by date""" hist = self.client.hub_history() middle = self.client.db_query({'msg_id' : hist[len(hist)//2]})[0] tic = middle['submitted'] before = self.client.db_query({'submitted' : {'$lt' : tic}}) after = self.client.db_query({'submitted' : {'$gte' : tic}}) self.assertEquals(len(before)+len(after),len(hist)) for b in before: self.assertTrue(b['submitted'] < tic) for a in after: self.assertTrue(a['submitted'] >= tic) same = self.client.db_query({'submitted' : tic}) for s in same: self.assertTrue(s['submitted'] == tic) def test_db_query_k
rtfd/readthedocs.org
readthedocs/projects/migrations/0065_add_feature_future_default_true.py
Python
mit
594
0.001684
# Generated by Django 2.2.16 on 2020-10-01 18:00 from django.db import migrations, models class Migration(mig
rations.Migration): dependencies = [ ('projects', '0064_add_feature_future_default_true'), ] operations = [ migrations.AlterField( model_name='project', name='privacy_level', field=models.CharField(choices=[('public', 'Public'), ('pr
otected', 'Protected'), ('private', 'Private')], default='public', help_text='Should the project dashboard be public?', max_length=20, verbose_name='Privacy Level'), ), ]
socialwifi/jsonapi-requests
setup.py
Python
bsd-3-clause
1,500
0.001333
try: from pip._internal.req import parse_requirements except ImportError: from pip.req import parse_requirements from setuptools import find_packages from setuptools import setup def get_long_description(): with open('README.md') as readme_file: return readme_file.read() setup( name='jsonapi-requests', version='0.6.2.dev0', description='Python client implementation for json api. http://jsonapi.org/', long_description=get_long_description(),
long_description_content_type='text/markdown', author='Social WiFi', author_email='it@socialwifi.com', url='https://github.com/socialwifi/jsonapi-requests', packages=find_packages(exclude=['tests']), install_requires=[str(ir.req) for ir in parse_requirements('base_requirements.txt', session=False)],
setup_requires=['pytest-runner'], tests_require=['pytest', 'flask'], extras_require={ 'flask': ['flask'] }, license='BSD', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ] )
Situphen/Python-ZMarkdown
markdown/extensions/video.py
Python
bsd-3-clause
6,325
0.002213
#!/usr/bin/env python import markdown from markdown.util import etree from markdown.blockprocessors import BlockProcessor import re class VideoExtension(markdown.Extension): def __init__(self, js_support=False, **kwargs): markdown.Extension.__init__(self) self.config = { 'dailymotion_width': ['480', 'Width for Dailymotion videos'], 'dailymotion_height': ['270', 'Height for Dailymotion videos'], 'vimeo_width': ['500', 'Width for Vimeo videos'], 'vimeo_height': ['281', 'Height for Vimeo videos'], 'yahoo_width': ['624', 'Width for Yahoo! videos'], 'yahoo_height': ['351', 'Height for Yahoo! videos'], 'youtube_width': ['560', 'Width for Youtube videos'], 'youtube_height': ['315', 'Height for Youtube videos'], 'ina_width': ['620', 'Width for INA videos'], 'ina_height': ['349', 'Height for INA videos'], 'jsfiddle': [False, ''], 'jsfiddle_width': ['560', 'Width for jsfiddle'], 'jsfiddle_height': ['560', 'Height for jsfiddle'], } self.config['youtube_short_width'] = self.config['youtube_width'] self.config['youtube_short_height'] = self.config['youtube_height'] # Override defaults with user settings for key, value in kwargs.items(): self.setConfig(key, value) if js_support: self.setConfig("jsfiddle", True) def add_inline(self, md, name, klass, pat): RE = r'(^|\n)!\(' + pat + r'\)' md.parser.blockprocessors.add("video-" + name, klass(md, RE, self.config["{}_width".format(name)][0], self.config["{}_height".format(name)][0]), "_begin") def extendMarkdown(self, md, md_globals): self.add_inline(md, 'dailymotion', Dailymotion, r'https?://www\.dailymotion\.com/video/(?P<dailymotionid>[a-z0-9]+)(_[\w\-]*)?') self.add_inline(
md, 'vimeo', Vimeo, r'https?
://(www.|)vimeo\.com/(?P<vimeoid>\d+)\S*') self.add_inline(md, 'yahoo', Yahoo, r'https?://screen\.yahoo\.com/.+/?') self.add_inline(md, 'youtube', Youtube, r'https?://(www\.)?youtube\.com/watch\?\S*v=(?P<youtubeid>\S[^&/]+)' r'(?P<channel>&ab_channel=[\w%]+)?') self.add_inline(md, 'youtube_short', Youtube, r'https?://youtu\.be/(?P<youtubeid>\S[^?&/]+)?') self.add_inline(md, 'ina', Ina, r'https?://www\.ina\.fr/video/(?P<inaid>[A-Z0-9]+)/([\w\-]*)\.html') if self.config["jsfiddle"][0]: self.add_inline(md, 'jsfiddle', JsFiddle, r'https?://(www.|)jsfiddle\.net(/(?P<jsfiddleuser>\w+))?/' r'(?P<jsfiddleid>\w+)(/(?P<jsfiddlerev>[0-9]+)|)/?') class VideoBProcessor(BlockProcessor): def __init__(self, md, patt, width, height): BlockProcessor.__init__(self, md.parser) self.md = md self.width = width self.height = height self.RE = re.compile(patt) def test(self, parent, block): return bool(self.RE.search(block)) def run(self, parent, blocks): m = self.RE.search(blocks[0]) el = self.handle_match(m) if el is None: return False block = blocks.pop(0) before = block[:m.start()] after = block[m.end():] if before: # pragma: no cover # This should never occur because regex require that the expression is starting the block. # Do not raise an exception because exception should never be generated. self.md.parser.parseBlocks(parent, [before]) parent.append(el) if after: blocks.insert(0, after) @staticmethod def extract_url(m): # pragma: no cover # Should be overridden in sub-class return "" def handle_match(self, m): url = self.extract_url(m) if url is None: return None return self.render_iframe(url, self.width, self.height) @staticmethod def render_iframe(url, width, height): iframe = etree.Element('iframe') iframe.set('width', width) iframe.set('height', height) iframe.set('src', url) iframe.set('allowfullscreen', 'true') iframe.set('frameborder', '0') return iframe class Dailymotion(VideoBProcessor): @staticmethod def extract_url(m): return 'https://www.dailymotion.com/embed/video/%s' % m.group('dailymotionid') class Vimeo(VideoBProcessor): @staticmethod def extract_url(m): return 'https://player.vimeo.com/video/%s' % m.group('vimeoid') class Yahoo(VideoBProcessor): @staticmethod def extract_url(m): return m.string + '?format=embed&player_autoplay=false' class Youtube(VideoBProcessor): @staticmethod def extract_url(m): return 'https://www.youtube.com/embed/%s' % m.group('youtubeid') class Ina(VideoBProcessor): @staticmethod def extract_url(m): return 'http://player.ina.fr/player/embed/%s/1/1b0bd203fbcd702f9bc9b10ac3d0fc21/560/315/1/148db8' % m.group( 'inaid') class JsFiddle(VideoBProcessor): @staticmethod def extract_url(m): fields = (m.group('jsfiddleuser'), m.group('jsfiddleid'), m.group('jsfiddlerev')) if fields[0] is not None and fields[2] is None: # Only two part, revision could be in id pattern try: int(fields[1]) # It is a revision ! fields = (None, fields[0], fields[1]) except ValueError: pass if fields[0] is not None and fields[1] is not None and fields[2] is None: # Base version link, should not be allowed because content can be changed externally return None base = "https://jsfiddle.net/{}/embedded/result,js,html,css/" return base.format("/".join([t for t in fields if t is not None])) def makeExtension(*args, **kwargs): return VideoExtension(*args, **kwargs)
mscansian/SigmaWebPlus
plus/kivyapp.py
Python
gpl-3.0
1,840
0.014706
#!/usr/bin/python # -*- coding: UTF-8 -*- ''' kivyapp.py Este arquivo descreve a classe KivyApp que é a classe derivada de kivy.app.App do kivy. Esta classe é necessaria para inicializar um aplicativo com o kivy. Após inicializar a classe, voce deve setar uma classe parent (atraves de KivyApp.parent) para receber um callback de todas as funcoes que forem chamadas nesta classe. Metodos publicos (Varios, mas não existe a nec
essidade de chamar eles) Dependencias (dentro do projeto) ''' from kivy import Config from kivy.app import App from kivy.clock import Clock class KivyApp(App): parent = None def build(self): if self.parent == None: raise KivyAppException("Variable parent not defined in KivyApp") Config.set('kivy', 'exit_on_escape', 0) Config.set('kivy', 'log_en
able', 0) Clock.schedule_interval(self.on_update, 0) #Schedule main update def on_start(self): return self.parent.on_start() def on_stop(self): return self.parent.on_stop() def on_pause(self): return self.parent.on_pause() def on_resume(self): return self.parent.on_resume() def on_update(self, *args): return self.parent.on_update() def build_settings(self, settings): self.parent.build_settings(settings) def build_config(self, config): self.parent.build_config(config) def on_config_change(self, config, section, key, value): self.parent.on_config_change(config, section, key, value) def on_event(self, *args): return self.parent.on_event(*args) class KivyAppException(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value)
dhalperi/incubator-beam
sdks/python/apache_beam/examples/complete/estimate_pi.py
Python
apache-2.0
4,482
0.006475
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A workflow that uses a simple Monte Carlo method to estimate π. The algorithm computes the fraction of points drawn uniformly within the unit square that also fall in the quadrant of the unit circle that overlaps the square. A simple area calculation shows that this fraction should be π/4, so we multiply our counts ratio by four to estimate π. """ from __future__ import absolute_import import argparse import json import logging import random import apache_beam as beam from apache_beam.io import WriteToText from apache_beam.typehints import Any from apache_beam.typehints import Iterable from apache_beam.typehints import Tuple from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.options.pipeline_options import SetupOptions @beam.typehints.with_output_types(Tuple[int, int, int]) @beam.typehints.with_input_types(int) def run_trials(runs): """Run trials and return a 3-tuple representing the results. Args: runs: Number of trial runs to be executed. Returns: A 3-tuple (total trials, inside trials, 0). The final zero is needed solely to make sure that the combine_results function has same type for inputs and outputs (a requirement for combiner functions). """ inside_runs = 0 for _ in xrange(runs): x = random.uniform(0, 1) y = random.uniform(0, 1) inside_runs += 1 if x * x + y * y <= 1.0 else 0 return runs, inside_runs, 0 @beam.typehints.with_output_types(Tuple[int, int, float]) @beam.typehints.with_input_types(Iterable[Tuple[int, int, Any]]) def combine_results(results): """Combiner function to sum up trials and compute the estimate. Args: results: An iterable of 3-tuples (total trials, inside trials, ignored). Returns: A 3-tuple containing the sum of total trials, sum of inside trials, and the probability computed from the two numbers. """ # TODO(silviuc): Do we guarantee that argument can be iterated repeatedly? # Should document one way or the other. total, inside = sum(r[0] for r in results), sum(r[1] for r in results) return total, inside, 4 * float(inside) / total class JsonCoder(object): """A JSON coder used to format the final result.""" def encode(self, x): return json.dumps(x) class EstimatePiTransform(beam.PTransform): """Runs 10M trials, and combine the results to estimate pi.""" def __init__(self, tries_per_work_item=100000): self.tries_per_work_item = tries_per_work_item def expand(self, pcoll): # A hundred work items of a hundred thousand tries each. return (pcoll | 'Initialize' >> beam.Create( [self.tries_per_work_item] * 100).with_output_types(int) | 'Run trials' >> beam.Map(run_trials) | 'Sum' >> beam.CombineGlobally(combine_results).without_defaults()) def run(argv=None):
parser = argparse.ArgumentParser() parser.add_argument('--output', required=True, help='Output file to write results to.') known_args, pipeline_args = parser.parse_known_args(argv) # We use the save_main_session option because one or more DoFn's in this # workflow rely on global context (e.g., a module imp
orted at module level). pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = True p = beam.Pipeline(options=pipeline_options) (p # pylint: disable=expression-not-assigned | EstimatePiTransform() | WriteToText(known_args.output, coder=JsonCoder())) # Actually run the pipeline (all operations above are deferred). p.run() if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) run()
lmazuel/azure-sdk-for-python
azure-keyvault/azure/keyvault/version.py
Python
mit
493
0.002028
# co
ding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- VERSION = "0.
3.7"
caperren/Archives
OSU Coursework/ROB 456 - Intelligent Robotics/Homework 0 - Robotics Probabilities Examples/HW0.py
Python
gpl-3.0
5,437
0.003862
import numpy import scipy import matplotlib.pyplot as pyplot import time numpy.random.seed(int(time.time())) PART_1_COEFFICIENTS = numpy.array([-0.1, 4.0, -0.1, 10.0], float) PART_1_X_LIMITS = [-10.0, 25.0] def plot_form(axis_handle, x_limit=None, title="", x_label="x", y_label="f(x)"): if x_limit is not None: axis_handle.set_xlim(x_limit) axis_handle.set_title(title) axis_handle.set_xlabel(x_label) axis_handle.set_ylabel(y_label) def part_1_polynomial(x_input): return numpy.polyval(PART_1_COEFFICIENTS, x_input) def part_2_plot(): x_limit_min = PART_1_X_LIMITS[0] x_limit_max = PART_1_X_LIMITS[1] temp = numpy.linspace(x_limit_min, x_limit_max, 351, dtype=float) function_handle_1, axis_handle_1 = pyplot.subplots() axis_handle_1.plot(temp, part_1_polynomial(temp), "b-") plot_form(axis_handle_1, PART_1_X_LIMITS, "Original Polynomial") function_handle_1.savefig("figures/hw0_original_polynomial.pdf", bbox_inches="tight") def part_3(): x_limit_min = PART_1_X_LIMITS[0] x_limit_max = PART_1_X_LIMITS[1] bin_width = (x_limit_max-x_limit_min) / 14.0 x_bin = numpy.arange(x_limit_min, x_limit_max, bin_width, float) y_bin = part_1_polynomial(x_bin) function_handle, axis_handle = pyplot.subplots() axis_handle.bar(x_bin + bin_width/2.0, y_bin, width=bin_width, edgecolor="k") plot_form(axis_handle, PART_1_X_LIMITS, "Discretized Bins") function_handle.savefig("figures/hw0_discretized_bins.pdf", bbox_inches="tight") def part_4(): x_limit_min = PART_1_X_LIMITS[0] x_limit_max = PART_1_X_LIMITS[1] bin_width = (x_limit_max - x_limit_min) / 14.0 x_bin = numpy.arange(x_limit_min, x_limit_max, bin_width, float) y_bin = part_1_polynomial(x_bin) y_bin_normalized = y_bin / y_bin.sum() function_handle, axis_handle = pyplot.subplots() axis_handle.bar(x_bin + bin_width / 2.0, y_bin_normalized, width=bin_width, edgecolor="k") plot_form(axis_handle, PART_1_X_LIMITS, "Discretized Bins (Normalized) sum=%s" % y_bin_normalized.sum(), y_label="p(k)") function_handle.savefig("figures/hw0_discretized_bins_normalized.pdf", bbox_inches="tight") def part_5_1(): num_samples = 500 x_rand_values = numpy.arange(1, num_samples+1, 1, int) y_rand_values = numpy.random.random(num_samples) function_handle, axis_handle = pyplot.subplots() pyplot.plot(x_rand_values, y_rand_values, "k+") plot_form(axis_handle, x_limit=[1, num_samples], title="%s Samples, Uniformly Distributed" % num_samples) function_handle.savefig("figures/hw0_%s_random_samples.pdf" % num_samples, bbox_inches="tight") return (x_rand_values, y_rand_values) def part_5_2(vals): num_samples = 500 x_limit_min = PART_1_X_LIMITS[0] x_limit_max = PART_1_X_LIMITS[1] bin_width = (x_limit_max - x_limit_min) / 14.0 x_bin = numpy.arange(x_limit_min, x_limit_max, bin_width, float) x_rand_values = vals[0] y_rand_values = vals[1] y_random_scaled = y_rand_values * ((x_limit_max - x_limit_min) + x_limit_min) function_handle, axis_handle = pyplot.subplots() pyplot.plot(x_rand_values, y_random_scaled, "k+") for i in range(0, len(x_bin)): axis_handle.plot([1, num_samples], [x_bin[0], x_bin[1]]) plot_form(axis_handle, [1, num_samples], "Random Samples Mapped to X Ranges Of Bins") function_handle.savefig("figures/hw0_random_bins_to_ranges.pdf", bbox_inches="tight") def part_5_3(): y_count_incorrect = numpy.zeros(x_bin.shape) for i in range(0, len(y_rand_scaled)): for j in range(len(x_bin), 0, -1): if y_rand_scaled[i] > x_bin[j-1]: y_count_incorrect[j-1] += 1 break function_handle, axis_handle = pyplot.subplots() pyplot.plot(x_bin+b_width/2.0, y_random_incorrect, "k+") plot_form(axis_handle, PART_1_X_LIMITS, "Samples per bin (incorrect)", bbox_inches="tight") #savefig "hw0_samples_per_bin_incorrect.pdf" def part_5_4(): y_bin_cdf = y_bin_normalized.copy() i = 0 while i < len(y_bin_cdf) - 1: i += 1 y_bin_cdf[i] += y_bin_cdf[i-1] function_handle, axis_handle = pyplot.subplots() axis_handle.plot(x_rand, y_rand, "k+") for i in range(0, len(y_bin_cdf)): axis_handle.plot([1, num_samples], [y_bin_cdf[0], y_bin_cdf[1]]) axis_handle.set_title("Dividing up
the samples according to bin height") function_handle.savefig("hw0_correct_sample_division.pdf", bbox_inches="tight") y_count_correct = numpy.zeros(x_bin.shape) for i in range(0, len(y_rand)): for j in range(len_bin_cdf): if y_rand[i] < y_bin_cdf[j]: y_count_correct[j
] += 1 break function_handle_1, axis_handle_1 = pyplot.subplots() axis_handle_1.bar(x_bin + b_width/2.0, y_count_correct, width=b_width, edgecolor="k") plot_form(axis_handle_1, x_limit=PART_1_X_LIMITS, "Samples per bin (correct)", y_label="samples") function_handle.savefig("hw0_samples_per_bin_correct.pdf", bbox_inches="tight") def real_part_2(): pass if __name__ == '__main__': # part_2_plot() # part_3() # part_4() # vals = part_5_1() # part_5_2(vals) real_part_2() pyplot.show()
JulienMcJay/eclock
windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/test/testall.py
Python
gpl-2.0
5,957
0.004029
import sys, os import re import unittest import traceback import pywin32_testutil # A list of demos that depend on user-interface of *any* kind. Tests listed # here are not suitable for unattended testing. ui_demos = """GetSaveFileName print_desktop win32cred_demo win32gui_demo win32gui_dialog win32gui_menu win32gui_taskbar win32rcparser_demo winprocess win32console_demo win32gui_devicenotify NetValidatePasswordPolicy""".split() # Other demos known as 'bad' (or at least highly unlikely to work) # cerapi: no CE module is built (CE via pywin32 appears dead) # desktopmanager: hangs (well, hangs for 60secs or so...) bad_demos = "cerapi desktopmanager win32comport_demo".split() argvs = { "rastest": ("-l",), } # re to pull apart an exception line into the exception type and the args. re_exception = re.compile("([a-zA-Z0-9_.]*): (.*)$") def find_exception_in_output(data): have_traceback = False for line in data.splitlines(): line = line.decode('ascii') # not sure what the correct encoding is... if line.startswith("Traceback ("): have_traceback = True continue if line.startswith(" "): continue if have_traceback: # first line not starting with a space since the traceback. # must be the exception! m = re_exception.match(line) if m: exc_type, args = m.groups() # get hacky - get the *real* exception object from the name. bits = exc_type.split(".", 1) if len(bits) > 1: mod = __import__(bits[0]) exc = getattr(mod, bits[1]) else: # probably builtin exc = eval(bits[0]) else: # hrm - probably just an exception with no args try: exc = eval(line.strip()) args = "()" except: return None # try and turn the args into real args. try: args = eval(args) except: pass if not isinstance(args, tuple): args = (args,) # try and instantiate the exception. try: ret = exc(*args) except: ret = None return ret # apparently not - keep looking... have_traceback = False class TestRunner: def __init__(self, argv): self.argv = argv def __call__(self): try: import subprocess p = subprocess.Popen(self.argv, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, _ = p.communicate() rc = p.returncode except ImportError: # py2.3? fin, fout, ferr = os.popen3(" ".join(self.argv)) fin.close() output = fout.read() + ferr.read() fout.close() rc = ferr.close() if rc: base = os.path.basename(self.argv[1]) # See if we can detect and reconstruct an exception in the output. reconstituted = find_exception_in_output(output) if reconstituted is not None: raise reconstituted raise AssertionError("%s failed with exit code %s. Output is:\n%s" % (base, rc, output)) def get_demo_tests(): import win32api ret = [] demo_dir = os.path.abspath(os.path.join(os.path.dirname(win32api.__file__), "Demos")) assert os.path.isdir(demo_dir), demo_dir for name in os.listdir(demo_dir): base, ext = os.path.splitext(name) if ext != ".py" or base in ui_demos or base in bad_demos: continue argv = (sys.executable, os.path.join(demo_dir, base+".py")) + \ argvs.get(base, ()) ret.append(unittest.FunctionTestCase(TestRunner(argv), description="win32/demos/" + name)) return ret def import_all(): # Some hacks for import order - dde depends on win32ui try: import win32ui except ImportError: pass # 'what-ev-a....' import win32api dir = os.path.dirname(win32api.__file__) num = 0 is_debug = os.path.basename(win32api.__file__).endswith("_d") for name in os.listdir(dir): base, ext = os.path.splitext(name) if (ext==".pyd") and \ name != "_winxptheme.pyd" and \ (is_debug and base.endswith("_d") or \ not is_debug and not base.endswith("_d")): try: __import__(base) except: print "FAILED to import", name raise num += 1 def suite(): # Loop over all .py files here, except me :) try: me = __file__ except NameEr
ror: me = sys.argv[0] me = os.path.abspath(me) files = os.listdir(os.path.dirname(me)) suite = unittest.TestSuite() suite.addTest(unittest.FunctionTestCase(import_all)) for file in files: base, ext = os.path.splitext(file) if ext=='.py' and os.path.basename(me) != file: try: mod = __import__(base) except: print "FAILED to import test module %r" % base
traceback.print_exc() continue if hasattr(mod, "suite"): test = mod.suite() else: test = unittest.defaultTestLoader.loadTestsFromModule(mod) suite.addTest(test) for test in get_demo_tests(): suite.addTest(test) return suite class CustomLoader(pywin32_testutil.TestLoader): def loadTestsFromModule(self, module): return self.fixupTestsForLeakTests(suite()) if __name__=='__main__': pywin32_testutil.testmain(testLoader=CustomLoader())
airbnb/airflow
airflow/contrib/hooks/discord_webhook_hook.py
Python
apache-2.0
1,175
0.001702
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obta
in a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """This module is deprecated. Please use `airflow.providers.discord.hooks.discord_webhook`.""" import warnings # pylint: disable=unused-import from airflow.providers.discord.hooks.discord_webhook import DiscordWebhookHook # noqa warnings.warn( "This module is deprecated. Please use `airflow.providers.discord.hooks.discord_webhook`.", DeprecationWarning, stacklevel=2, )
awesto/django-shop
shop/filters.py
Python
bsd-3-clause
1,512
0
from django_filters import filters from djng.forms import fields class Filter(filters.Filter): field_class = fields.Field class CharFilter(filters.CharFilter):
field_class = fields.CharField class BooleanFilter(filters.BooleanFilter): field_class = fields.NullBooleanField class ChoiceFilter(filters.ChoiceFilter): field_class = fields.ChoiceField class TypedChoiceFilter(filters.TypedChoiceFilter): field_class = fields.TypedChoiceField class UUIDFilter(filters.UUIDFilter): field_class = fields.UUIDField class MultipleChoiceFilter(filters.MultipleCh
oiceFilter): field_class = fields.MultipleChoiceField class TypedMultipleChoiceFilter(filters.TypedMultipleChoiceFilter): field_class = fields.TypedMultipleChoiceField class DateFilter(filters.DateFilter): field_class = fields.DateField class DateTimeFilter(filters.DateTimeFilter): field_class = fields.DateTimeField class TimeFilter(filters.TimeFilter): field_class = fields.TimeField class DurationFilter(filters.DurationFilter): field_class = fields.DurationField class ModelChoiceFilter(filters.ModelChoiceFilter): field_class = fields.ModelChoiceField class ModelMultipleChoiceFilter(filters.ModelMultipleChoiceFilter): field_class = fields.ModelMultipleChoiceField class NumberFilter(filters.NumberFilter): field_class = fields.DecimalField class NumericRangeFilter(filters.NumericRangeFilter): """ TODO: we first must redeclare the RangeField """
maljac/odoo-addons
partner_person/__openerp__.py
Python
agpl-3.0
2,095
0.000955
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar) #
All Rights Reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foun
dation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Partners Persons Management', 'version': '1.0', 'category': 'Tools', 'sequence': 14, 'summary': '', 'description': """ Partners Persons Management =========================== Openerp consider a person those partners that have not "is_company" as true, now, those partners can have: ---------------------------------------------------------------------------------------------------------- * First Name and Last Name * Birthdate * Sex * Mother and Father * Childs * Age (functional field) * Nationality * Husband/Wife * National Identity * Passport * Marital Status It also adds a configuration menu for choosing which fields do you wanna see. """, 'author': 'ADHOC SA', 'website': 'www.adhoc.com.ar', 'images': [ ], 'depends': [ 'base', ], 'data': [ 'res_partner_view.xml', 'res_config_view.xml', 'security/partner_person_security.xml', ], 'demo': [ ], 'test': [ ], 'installable': True, 'auto_install': False, 'application': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
poswald/microformats
microformats/forms.py
Python
bsd-3-clause
9,632
0.005613
# -*- coding: UTF-8 -*- """ Example Forms for Microformats. Copyright (c) 2009 Nicholas H.Tollervey (http://ntoll.org/contact) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyr
ight notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of ntoll.org nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ # Django from django import forms from django.forms.util import ErrorList from django.utils.translation import ugettext as _ # Microformats from microformats.models import geo, hCard, adr, adr_type, org, email,\ email_type, tel, tel_type, hCalendar, hReview, hListing, hFeed,\ hEntry, hNews class GeoForm(forms.ModelForm): """ A ModelForm for the geo microformat that makes sure the degrees decimal fields are within the valid ranges: Latitude: ±90° Longitude: ±180° """ def clean_latitude(self): """ ±90 """ value = self.cleaned_data['latitude'] if value < -90.0 or value > 90.0: raise forms.ValidationError(_(u'Latitude is not within the valid' u' range (±90)')) return value def clean_longitude(self): """ ±180 """ value = self.cleaned_data['longitude'] if value < -180.0 or value > 180.0: raise forms.ValidationError(_(u'Longitude is not within the valid' u' range (±180)')) return value class Meta: model = geo class LocationAwareForm(forms.ModelForm): """ Used in concert with models derived from the LocationAwareMicroformat model. This form makes sure that the geo information is valid. """ def clean(self): """ Checks if you have one of Long or Lat you must have the other """ super(LocationAwareForm, self).clean() cleaned_data = self.cleaned_data # Make sure we have a longitude and latitude lat = cleaned_data.get("latitude", False) long = cleaned_data.get("longitude", False) if long and not lat: self._errors['longitude'] = ErrorList([_("You must supply both a"\ " longitude and latitude")]) del cleaned_data['longitude'] if lat and not long: self._errors['latitude'] = ErrorList([_("You must supply both a"\ " longitude and latitude")]) del cleaned_data['latitude'] return cleaned_data def clean_latitude(self): """ ±90 """ value = self.cleaned_data.get('latitude', False) if value: if value < -90.0 or value > 90.0: raise forms.ValidationError(_(u'Latitude is not within the valid' u' range (±90)')) return value def clean_longitude(self): """ ±180 """ value = self.cleaned_data.get('longitude', False) if value: if value < -180.0 or value > 180.0: raise forms.ValidationError(_(u'Longitude is not within the valid' u' range (±180)')) return value class hCardForm(LocationAwareForm): """ A simple form to use for gathering basic information for an hCard. Use in conjunction with the AdrForm, OrgForm, EmailForm and TelForm to build something more complex. Inspired by: http://microformats.org/code/hcard/creator """ def clean(self): """ Checks you have something useful to use as fn """ super(hCardForm, self).clean() cleaned_data = self.cleaned_data # Some minimum fields needed to create a fn org = cleaned_data.get('org', False) given_name = cleaned_data.get('given_name', False) family_name = cleaned_data.get('family_name', False) nickname = cleaned_data.get('nickname', False) # What the following if statement means: # if the user hasn't supplied either and organization name or provided # at least a nickname or a given name then raise an error if not (org or nickname or given_name): raise forms.ValidationError(_("You must supply a name. "\ " (given name, family name, nickname"\ " or an organization name)")) return cleaned_data class Meta: model = hCard class hCalForm(LocationAwareForm): """ A simple form for gathering information for an hCalendar event. Inspired by the form found here: http://microformats.org/code/hcalendar/creator """ class Meta: model = hCalendar exclude = [ 'attendees', 'contacts', 'organizers', ] class hReviewForm(LocationAwareForm): """ A simple form for gathering information for an hReview microformat. Inspired by the form found here: http://microformats.org/code/hreview/creator """ class Meta: model = hReview class hListingForm(LocationAwareForm): """ A simple form for gathering information for an hListing microforat. """ class Meta: model = hListing class hFeedForm(forms.ModelForm): """ A simple form for gathering information for the hFeed part of the hAtom microformat. """ class Meta: model = hFeed class hEntryForm(forms.ModelForm): """ A simple form for gathering information for the hEntry part of the hAtom microformat. """ class Meta: model = hEntry class hNewsForm(LocationAwareForm): """ A simple form for gathering information for the hNews part of the hEntry microformat. """ class Meta: model = hNews class AdrForm(forms.ModelForm): """ A simple form to use for gathering basic information for an adr microformat. Use in conjunction with the hCardForm, OrgForm, EmailForm and TelForm to build something more complex. Inspired by: http://microformats.org/code/hcard/creator """ def __init__(self, *args, **kwargs): super(AdrForm, self).__init__(*args, **kwargs) if 'types' in self.fields: self.fields['types'].widget = forms.CheckboxSelectMultiple() self.fields['types'].label = _('Address Type') self.fields['types'].help_text = _('Please select as many that apply') self.fields['types'].queryset = adr_type.objects.all() class Meta: model = adr exclude = ['hcard', 'post_office_box'] class OrgForm(forms.ModelForm): """ A simple form to use for gathering basic information for an organisation associated with an hCard. Use in conjunction with the AdrForm, EmailForm and TelForm to build something more complex. Inspired by: http://microformats.org/code/hcard/creator """ class Meta: model = org exclude = ['hcard'] class EmailForm(forms.ModelForm): """
jherrlin/unipass
tests/test_controller.py
Python
mit
1,722
0.002323
import unittest import os from unipass.controller import controller from unipass.model.models import initdb BASE_DIR = os.path.dirname(os.path.abspath(__file__)) class ControllerTest(unittest.TestCase): def setUp(self): initdb() def tearDown(self): try: os.remove('sqlite3.db') os.remove('unipass_export.json') except OSError: pass def test_createAdmin_True(self): self.assertTrue(controller.create_user('john', 'password')) def test_addService_True(self): self.assertTrue(controller.add_service('facebook', 'jherrin@gmail.com', 'password', 'facebook acc')) def test_login_True(self): self.assertTrue(controller.create_user('admin', 'password')) self.assertTrue(controller.login('admin', 'password')) def test_deleteEntry_True(self): self.assertTrue(controller.add_service('facebook', 'jherrin@gmail.com', 'password', 'facebook acc')) serv = controller.find_by_service('facebook') self.assertTrue(controller.delete_service(serv.uuid)) def test_exportData_True(self): self.assertTrue(controller.create_user('john', 'password')) self.assertTrue(controller.export_data()) def test_exportData_False(self):
self.assertTrue(controller.export_data()) def test_importData_False(self): self.assertFalse(controller.import_data(path=BASE_DIR+'/broken.json')) def test_importData_True(self): se
lf.assertTrue(controller.import_data(path=BASE_DIR+'/correct.json')) def test_generatePassword_True(self): self.assertTrue(len(controller.generate_password( True, True, True, True, 10)) == 10)
M4rtinK/pyside-android
tests/QtGui/bug_972.py
Python
lgpl-2.1
1,124
0.003559
import unittest from PySide.QtCore import QSizeF from PySide.QtGui import QGraphicsProxyWidget, QSizePolicy, QPushButton, QGraphicsScene, QGraphicsView from helper import TimedQApplication def createItem(minimum, preferred, maximum, name): w = QGraphicsProxyWidget() w.setWidget(QPushButton(name)) w.setMinimumSize(minimum) w.setPreferredSize(preferred) w.setMaximumSize
(maximum) w.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) return w class TestBug972 (TimedQApplication): # Test if the function QGraphicsProxyWidget.setWidget have the correct behavior def testIt(self): scene = QGraphicsScene() minSize = QSizeF(30, 100) prefSize = QSizeF(210, 100) ma
xSize = QSizeF(300, 100) a = createItem(minSize, prefSize, maxSize, "A") b = createItem(minSize, prefSize, maxSize, "B") c = createItem(minSize, prefSize, maxSize, "C") d = createItem(minSize, prefSize, maxSize, "D") view = QGraphicsView(scene) view.show() self.app.exec_() if __name__ == "__main__": unittest.main()
fedspendingtransparency/data-act-broker-backend
dataactcore/scripts/delete_deleted_fpds_idv.py
Python
cc0-1.0
6,000
0.003
import logging import datetime import pandas as pd import numpy as np import os import boto3 from dataactcore.config import CONFIG_BROKER from dataactcore.interfaces.db import GlobalDB from dataactcore.logging import configure_logging from dataactcore.models.stagingModels import DetachedAwardProcurement from dataactcore.models.jobModels import Submission # noqa from dataactcore.models.userModel import User # noqa from dataactvalidator.health_check import create_app from dataactvalidator.scripts.loader_utils import trim_item from dataactvalidator.filestreaming.csvReader import CsvReader from dataactvalidator.filestreaming.csvLocalWriter import CsvLocalWriter logger = logging.getLogger(__name__) def get_delete_file(): """ Read the file into a pandas object """ file_name = 'IDV_Deletes.csv' if CONFIG_BROKER["use_aws"]: reader = CsvReader() pa_file = open(reader.get_filename(CONFIG_BROKER['aws_region'], CONFIG_BROKER['sf_133_bucket'], file_name), encoding='utf-8') else: base_path = os.path.join(CONFIG_BROKER["path"], "dataactvalidator", "config") pa_file = os.path.join(base_path, file_name) return pa_file def convert_date(date_string): """ Converts the date to the same format as our last_modified column """ delete_date = datetime.datetime.strptime(date_string, '%m/%d/%y %I:%M %p') date_string = delete_date.strftime('%Y-%m-%d %H:%M:%S') return date_string def convert_unique_key(unique_key): """ Converts the unique key given by the file into the format we use for our unique key """ unique_key_array = unique_key.split(':') unique_key = unique_key_array[2] + '_-none-_' + unique_key_array[0] + '_' + unique_key_array[1] + '_-none-_-none-' return unique_key def clean_delete_data(data): """ Clean up the data so it's easier to process """ # Shouldn't be any extra rows, but just in case, drop all with no contents data.dropna(inplace=True, how='all') # replace NaN data = data.replace(np.nan, '', regex=True) # trim all columns data = data.applymap(lambda x: trim_item(x) if len(str(x).strip()) else None) # Convert all dates to the same format as we have in the DB data['delete_date'] = data['delete_date'].map(lambda x: convert_date(x) if x else None) # Convert all unique keys to the same format as we have in the DB data['primary_key'] = data['primary_key'].map(lambda x: convert_unique_key(x) if x else None) return data def get_deletes(sess, data): """ Gets all the values that actually need to be deleted from our database """ model = DetachedAwardProcurement delete_dict = {} delete_list = [] row_count = len(data.index) for index, row in data.iterrows(): unique_string = row['primary_key'] last_modified = row['delete_date'] # Keeping track so we know it isn't spinning its wheels forever if index % 500 == 0: logger.info("Checking delete record {} of {}.".format(index, row_count)) existing_item = sess.query(model.last_modified, model.detached_award_procurement_id, model.detached_award_proc_unique). \ filter_by(detached_award_proc_unique=unique_string).one_or_none() if existing_item and last_modified > existing_item.last_modified: delete_list.append(existing_item.detached_award_procurement_id) delete_dict[existing_item.detached_award_procurement_id] = existing_item.detached_award_proc_unique return delete_list, delete_dict def delete_records(sess, delete_list, delete_dict): """ Delete the records listed and create a file for website deletion. """ # only need to delete values if there's something to delete if delete_list: sess.query(DetachedAwardProcurement). \ filter(DetachedAwardProcurement.detached_award_procurement_id.in_(delete_list)). \ delete(synchronize_session=False) # writing the file seconds = int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds()) now = datetime.datetime.now() file_name = now.strftime('%m-%d-%Y') + "_delete_records_IDV_" + str(seconds) + ".csv" headers = ["detached_award_procurement_id", "detached_award_proc_unique"] if CONFIG_BROKER["use_aws"]: s3client = boto3.client('s3', region_name=CONFIG_BROKER['aws_region']) # add headers contents = bytes((",".join(headers) + "\n").encode()) for key, value in delete_dict.items(): contents += bytes('{},{}\n'.format(key, value).encode()) s3client.put_object(Bucket=CONFIG_BROKER['fpds_delete_bucket'], Key=file_name, Body=contents) else: with CsvLocalWriter(file_name, headers) as writer: for key, value in delete_dict.items(): writer.write([key, value]) writer.finish_batch() def main(): sess = GlobalDB.db().session start = datetime.datetime.now() logger.info("FPDS IDV delete started") # get and read the file del_file = get_delete_file() data = pd.read_csv(del_file, dtype=str, encoding='utf_8_sig') # Clean up the data so it's usable data = clean_delete_data(data) # Gather list of records to delete gather_start = datetime.datetime.now() logger.info("Starting gathering of records to delete.") delete_list, delete_dict = get_deletes(ses
s, data) gather_end = datetime.datetime.now() logger.info("Finished gathering records in {} seconds. Tot
al records to delete: {}". format(gather_end - gather_start, len(delete_list))) # Delete records logger.info("Deleting records") delete_records(sess, delete_list, delete_dict) sess.commit() end = datetime.datetime.now() logger.info("FPDS IDV delete finished in %s seconds", end - start) if __name__ == '__main__': configure_logging() with create_app().app_context(): main()
aroquemaurel/Cuis-In
cuisin/restaurant/migrations/0001_initial.py
Python
gpl-2.0
1,387
0.001442
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('tags', '0001_initial'), ] operations = [ migrations.CreateModel( name='Restaurant', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.Cha
rField(max_length=128)), ('slug', models.SlugField(default=b'', max_length=128)), ('note', models.IntegerField(max_length=2)), ('date', models.DateTimeField(auto_now_add=True, verbose_name=b"Date d'ajout")), ('reservation', models.BooleanField(default=False)), ('description', models.TextField()), ('phone', models.CharField(default=b'', max_length=16)), ('website', models.CharField
(default=b'', max_length=128)), ('address', models.CharField(default=b'', max_length=128)), ('postalcode', models.CharField(default=b'', max_length=16)), ('city', models.CharField(default=b'', max_length=128)), ('tags', models.ManyToManyField(to='tags.Tag')), ], options={ }, bases=(models.Model,), ), ]
szecsi/Gears
GearsPy/Project/Components/Stimulus/FullfieldGradient.py
Python
gpl-2.0
898
0.040089
import Gears as gears from .. import * from .SingleShape import * class FullfieldGradient(SingleShape) : def boot(self, *, duration : 'Stimulus time in frames (unless superseded by duration_s).' = 1, duration_s : 'Stimulus time in seconds (takes precendece over duration given in frames).' = 0, name : 'Stimulus name to display in sequence overview plot.' = 'gradient', toneMapping : 'Tone m
apping component (Tone.*)' = Tone.UiConfigured(), **bargs : Pif.Gr
adient ): super().boot(name=name, duration=duration, duration_s=duration_s, pattern = Pif.Gradient( **bargs ), toneMapping = toneMapping, )
qq40660/rss_spider
script/stop_spider.py
Python
gpl-2.0
1,370
0.00219
#!/usr/bin/env python # _*_ coding: utf-8 _*_ import subprocess try: from simplejson import json except: import json # -> list ["422e608f9f28cef127b3d5ef93fe9399", ""] def list_job_ids(host="http://localhost", port=6800, project="default"): command = "curl %s:%d/listjobs.json?proj
ect=%s" % (host, port, project) command_result = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.readline() running_ids = json.loads(command_result).get("running")
ids = [] for i in running_ids: ids.append(i["id"]) return ids # str -> list "43242342342354efklajdf14" -> [4234, grep_pid] def id_to_pid(spider_id): command = "ps aux | grep %s | grep -v grep | awk '{print $2}'" % spider_id info = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.readlines() return info # list -> list ["asdfasdf234234", "a2345asdfaa"] -> [4324, 3453] def ids_to_pids(spider_ids): pids = [] for i in spider_ids: pid = id_to_pid(i) pids.extend(pid) return pids # kill 4323 def kill_spider(pid): command = "kill -9 %s" % pid subprocess.Popen(command, shell=True) # kill [4324, 4234] def kill_spider_list(pid_list): for i in pid_list: kill_spider(i) if __name__ == "__main__": ids = list_job_ids() pids = ids_to_pids(ids) kill_spider_list(pids)
BRCDcomm/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_ip_access_list.py
Python
apache-2.0
93,629
0.003407
#!/usr/bin/env python import xml.etree.ElementTree as ET class brocade_ip_access_list(object): """Auto generated class. """ def __init__(self, **kwargs): self._callback = kwargs.pop('callback') def ip_acl_ip_access_list_standard_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip_acl = ET.SubElement(config, "ip-acl", xmlns="urn:brocade.com:mgmt:brocade-ip-access-list") ip = ET.SubElement(ip_acl, "ip") access_list = ET.SubElement(ip, "access-list") standard = ET.SubElement(access_list, "standard") name = ET.SubElement(standard, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config) def ip_acl_ip_access_list_standard_hide_ip_acl_std_seq_seq_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip_acl = ET.SubElement(config, "ip-acl", xmlns="urn:brocade.com:mgmt:brocade-ip-access-list") ip = ET.SubElement(ip_acl, "ip") access_list = ET.SubElement(ip, "access-list") standard = ET.SubElement(access_list, "standard") name_key = ET.SubElement(standard, "name") name_key.text = kwargs.pop('name') hide_ip_acl_std = ET.SubElement(standard, "hide-ip-acl-std") seq = ET.SubElement(hide_ip_acl_std, "seq") seq_id = ET.SubElement(seq, "seq-id") seq_id.text = kwargs.pop('seq_id') callback = kwargs.pop('callback', self._callback) return callback(config) def ip_acl_ip_access_list_standard_hide_ip_acl_std_seq_action(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip_acl = ET.SubElement(config, "ip-acl", xmlns="urn:brocade.com:mgmt:brocade-ip-access-list") ip = ET.SubElement(ip_acl, "ip") access_list = ET.SubElement(ip, "access-list") standard = ET.SubElement(access_list, "standard") name_key = ET.SubElement(standard, "name") name_key.text = kwargs.pop('name') hide_ip_acl_std = ET.SubElement(standard, "hide-ip-acl-std") seq = ET.SubElement(hide_ip_acl_std, "seq") seq_id_key = ET.SubElement(seq, "seq-id") seq_id_key.text = kwargs.pop('seq_id') action = ET.SubElement(seq, "action") action.text = kwargs.pop('action') callback = kwargs.pop('callback', self._callback) return callback(config) def ip_acl_ip_access_list_standard_hide_ip_acl_std_seq_src_host_any_sip(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip_acl = ET.SubElement(config, "ip-acl", xmlns="urn:brocade.com:mgmt:brocade-ip-access-list") ip = ET.SubElement(ip_acl, "ip") access_list = ET.SubElement(ip, "access-list") standard = ET.SubElement(access_list, "standard") name_key = ET.SubElement(standard, "name") name_key.text = kwargs.pop('name') hide_ip_acl_std = ET.SubElement(standard, "hide-ip-acl-std") seq = ET.SubElement(hide_ip_acl_std, "seq") seq_id_key = ET.SubElement(seq, "seq-id") seq_id_key.text = kwargs.pop('seq_id') src_host_any_sip = ET.SubElement(seq, "src-host-any-sip") src_host_any_sip.text = kwargs.pop('src_host_any_sip') callback = kwargs.pop('callback', self._callback) return callback(config) def ip_acl_ip_access_list_standard_hide_ip_acl_std_seq_src_host_ip(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip_acl = ET.SubElement(config, "ip-acl"
, xmlns="urn:brocade.com:mgmt:brocade-ip-access-list") ip = ET.SubElement(ip_acl, "ip") access_list = ET.SubElement(ip, "access-list") standard = ET.SubElement(access_list, "standard") name_key = ET.SubElement(standard, "name") name_key.text = kwargs.pop('name') hide_ip_acl_std = ET.SubElement(standard, "hide-ip-acl-std") seq = ET.SubElement(hide_ip_acl_std, "seq") seq_id_key = ET.SubElement(s
eq, "seq-id") seq_id_key.text = kwargs.pop('seq_id') src_host_ip = ET.SubElement(seq, "src-host-ip") src_host_ip.text = kwargs.pop('src_host_ip') callback = kwargs.pop('callback', self._callback) return callback(config) def ip_acl_ip_access_list_standard_hide_ip_acl_std_seq_src_mask(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip_acl = ET.SubElement(config, "ip-acl", xmlns="urn:brocade.com:mgmt:brocade-ip-access-list") ip = ET.SubElement(ip_acl, "ip") access_list = ET.SubElement(ip, "access-list") standard = ET.SubElement(access_list, "standard") name_key = ET.SubElement(standard, "name") name_key.text = kwargs.pop('name') hide_ip_acl_std = ET.SubElement(standard, "hide-ip-acl-std") seq = ET.SubElement(hide_ip_acl_std, "seq") seq_id_key = ET.SubElement(seq, "seq-id") seq_id_key.text = kwargs.pop('seq_id') src_mask = ET.SubElement(seq, "src-mask") src_mask.text = kwargs.pop('src_mask') callback = kwargs.pop('callback', self._callback) return callback(config) def ip_acl_ip_access_list_standard_hide_ip_acl_std_seq_count(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip_acl = ET.SubElement(config, "ip-acl", xmlns="urn:brocade.com:mgmt:brocade-ip-access-list") ip = ET.SubElement(ip_acl, "ip") access_list = ET.SubElement(ip, "access-list") standard = ET.SubElement(access_list, "standard") name_key = ET.SubElement(standard, "name") name_key.text = kwargs.pop('name') hide_ip_acl_std = ET.SubElement(standard, "hide-ip-acl-std") seq = ET.SubElement(hide_ip_acl_std, "seq") seq_id_key = ET.SubElement(seq, "seq-id") seq_id_key.text = kwargs.pop('seq_id') count = ET.SubElement(seq, "count") callback = kwargs.pop('callback', self._callback) return callback(config) def ip_acl_ip_access_list_standard_hide_ip_acl_std_seq_log(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip_acl = ET.SubElement(config, "ip-acl", xmlns="urn:brocade.com:mgmt:brocade-ip-access-list") ip = ET.SubElement(ip_acl, "ip") access_list = ET.SubElement(ip, "access-list") standard = ET.SubElement(access_list, "standard") name_key = ET.SubElement(standard, "name") name_key.text = kwargs.pop('name') hide_ip_acl_std = ET.SubElement(standard, "hide-ip-acl-std") seq = ET.SubElement(hide_ip_acl_std, "seq") seq_id_key = ET.SubElement(seq, "seq-id") seq_id_key.text = kwargs.pop('seq_id') log = ET.SubElement(seq, "log") callback = kwargs.pop('callback', self._callback) return callback(config) def ip_acl_ip_access_list_extended_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip_acl = ET.SubElement(config, "ip-acl", xmlns="urn:brocade.com:mgmt:brocade-ip-access-list") ip = ET.SubElement(ip_acl, "ip") access_list = ET.SubElement(ip, "access-list") extended = ET.SubElement(access_list, "extended") name = ET.SubElement(extended, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config) def ip_acl_ip_access_list_extended_hide_ip_acl_ext_seq_seq_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip_acl = ET.SubElement(config, "ip-acl", xmlns="urn:brocade.com:mgmt:brocade-ip-access-list") ip = ET.SubElement(ip_acl, "ip") access_list = ET.SubElement(ip, "access-list") extended = ET.SubElement(access_list, "extended") name_key = ET.Sub
thombashi/typepy
test/converter/_common.py
Python
mit
276
0
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com> """ import typepy EXCEPTION_RESULT = "E" def convert_wrapper(typeobj, method): try: return getattr(typeobj, method)() except (typepy.TypeConversionError): return EXCEPTION_RESULT
shreyankg/Dorrie
mckup/build/translations.py
Python
agpl-3.0
628
0.003185
#!/usr/bin/python import sys file = sys.a
rgv[1] f = open(file) print ''' <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xmlns:py="http://genshi.edgewall.org/" xmlns:xi="http://www.w3.org/2001/XInclude" py:strip=""> ''' try: for lang in f: lang = lang.strip() if lang and not lang.startswith('#'): print ' <option value="' + lang + '" py:attrs="{\'selected\': lang == \'' + lang + '\'
and \'selected\' or None}">' + lang + '</option>' finally: f.close() print '''</html> '''
tejal29/pants
tests/python/pants_test/pants_run_integration_test.py
Python
apache-2.0
6,636
0.00859
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import subprocess import unittest from collections import namedtuple from operator import eq, ne from pants.base.build_environment import get_buildroot from pants.fs.archive import ZIP from pants.util.contextutil import temporary_dir from pants.util.dirutil import safe_mkdir, safe_open PantsResult = namedtuple('PantsResult', ['command', 'returncode', 'stdout_data', 'stderr_data']) class PantsRunIntegrationTest(unittest.TestCase): """A base class useful for integration tests for targets in the same repo.""" PANTS_SUCCESS_CODE = 0 PANTS_SCRIPT_NAME = 'pants' @classmethod def has_python_version(cls, version): """Returns true if the current system has the specified version of python. :param version: A python version string, such as 2.6, 3. """ try: subprocess.call(['python%s' % version, '-V']) return True except OSError: return False def workdir_root(self): # We can hard-code '.pants.d' here because we know that will always be its value # in the pantsbuild/pants repo (e.g., that's what we .gitignore in that repo). # Grabbing the pants_workdir config would require this pants's config object, # which we don't have a reference to here. root = os.path.join(get_buildroot(), '.pants.d', 'tmp') safe_mkdir(root) return root def run_pants_with_workdir(self, command, workdir, config=None, stdin_data=None, extra_env=None, **kwargs): config = config.copy() if config else {} # We add workdir to the DEFAULT section, and also ensure that it's emitted first. default_section = config.pop('DEFAULT', {}) default_section['pants_workdir'] = '%s' % workdir ini = '' for section, section_config in [('DEFAULT', default_section)] + config.items(): ini += '\n[%s]\n' % section for key, val in section_config.items(): ini += '%s: %s\n' % (key, val) ini_file_name = os.path.join(workdir, 'pants.ini') with safe_open(ini_file_name, mode='w') as fp: fp.write(ini) env = os.environ.copy() env.update(extra_env or {}) pants_script = os.path.join(get_buildroot(), self.PANTS_SCRIPT_NAME) pants_command = [pants_script, '--no-lock', '--kill-nailguns', '--no-pantsrc', '--config-override={0}'.format(ini_file_name), '--print-exception-stacktrace'] + command proc = subprocess.Popen(pants_command, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) (stdout_data, stderr_data) = proc.communicate(stdin_data) return PantsResult(pants_command, proc.returncode, stdout_data.decode("utf-8"), stderr_data.decode("utf-8")) def run_pants(self, command, config=None, stdin_data=None, extra_env=None, **kwargs): """Runs pants in a subprocess. :param list command: A list of command line arguments coming after `./pants`. :param config: Optional data for a generated ini file. A map of <section-name> -> map of key -> value. If order in the ini file matters, this should be an OrderedDict. :param kwargs: Extra keyword args to pass to `subprocess.Popen`. :returns a tuple (returncode, stdout_data, stderr_data). IMPORTANT NOTE: The subprocess will be run with --no-lock, so that it doesn't deadlock waiting for this process to release the workspace lock. It's the caller's responsibility to ensure that the invoked pants doesn't interact badly with this one. """ with temporary_dir(root_dir=self.workdir_root()) as workdir: return self.run_pants_with_workdir(command, workdir, config, stdin_data, extra_env, **kwargs) def bundle_and_run(self, target, bundle_name, args=None): """Creates the bundle with pants, then does java -jar {bundle_name}.jar to execute the bundle. :param target: target name to compile :param bundle_name: resulting bundle filename (minus .jar extension) :param args: optional arguments to pass to executable :return: stdout as a string on success, raises an Exception on error """ pants_run = self.run_pants(['bundle', '--archive=zip', target]) self.assert_success(pants_run) # TODO(John Sirois): We need a zip here to suck in external library classpath elements # pointed to by symlinks in the run_pants ephemeral tmpdir. Switch run_pants to be a # contextmanager that yields its results while the tmpdir workdir is still active and change # this test back to using an un-archived bundle. with temporary_dir() as workdir: ZIP.extract('dist/{bundle_name}.zip'.format(bundle_name=bundle_name), workdir) optional_args = [] if args: optional_args = args java_run = subprocess.Popen(['java',
'-jar', '{bundle_name}.jar'.format(bundle_name=bundle_name)] + optional_args,
stdout=subprocess.PIPE, cwd=workdir) stdout, _ = java_run.communicate() java_returncode = java_run.returncode self.assertEquals(java_returncode, 0) return stdout def assert_success(self, pants_run, msg=None): self.assert_result(pants_run, self.PANTS_SUCCESS_CODE, expected=True, msg=msg) def assert_failure(self, pants_run, msg=None): self.assert_result(pants_run, self.PANTS_SUCCESS_CODE, expected=False, msg=msg) def assert_result(self, pants_run, value, expected=True, msg=None): check, assertion = (eq, self.assertEqual) if expected else (ne, self.assertNotEqual) if check(pants_run.returncode, value): return details = [msg] if msg else [] details.append(' '.join(pants_run.command)) details.append('returncode: {returncode}'.format(returncode=pants_run.returncode)) def indent(content): return '\n\t'.join(content.splitlines()) if pants_run.stdout_data: details.append('stdout:\n\t{stdout}'.format(stdout=indent(pants_run.stdout_data))) if pants_run.stderr_data: details.append('stderr:\n\t{stderr}'.format(stderr=indent(pants_run.stderr_data))) error_msg = '\n'.join(details) assertion(value, pants_run.returncode, error_msg)
USGM/suds
tests/saxenc.py
Python
lgpl-3.0
1,896
0.003692
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your
option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; i
f not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( jortel@redhat.com ) # # sax encoding/decoding test. # from suds.sax.element import Element from suds.sax.parser import Parser def basic(): xml = "<a>Me &amp;&amp; &lt;b&gt;my&lt;/b&gt; shadow&apos;s &lt;i&gt;dog&lt;/i&gt; love to &apos;play&apos; and sing &quot;la,la,la&quot;;</a>" p = Parser() d = p.parse(string=xml) a = d.root() print('A(parsed)=\n%s' % a) assert str(a) == xml b = Element('a') b.setText('Me &&amp; &lt;b>my</b> shadow\'s <i>dog</i> love to \'play\' and sing "la,la,la";') print('B(encoded)=\n%s' % b) assert str(b) == xml print('A(text-decoded)=\n%s' % a.getText()) print('B(text-decoded)=\n%s' % b.getText()) assert a.getText() == b.getText() print('test pruning') j = Element('A') j.set('n', 1) j.append(Element('B')) print(j) j.prune() print(j) def cdata(): xml = '<a><![CDATA[<b>This is my &amp;&lt;tag&gt;</b>]]></a>' p = Parser() d = p.parse(string=xml) print(d) a = d.root() print(a.getText()) if __name__ == '__main__': #basic() cdata()
brycedrennan/internetarchive
internetarchive/cli/ia_configure.py
Python
agpl-3.0
2,377
0.000421
# -*- coding: utf-8 -*- # # The internetarchive module is a Python/CLI interface to Archive.org. # # Copyright (C) 2012-2016 Internet Archive # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Configure 'ia' with your Archive.org credentials. usage: ia configure [--help] ia configure [--username=<username> --password=<password>] options: -h, --help -u, --username=<username> Provide username as an option rather than providing it interactively. -p, --password=<password> Provide password as an option rather than providing it interactively. """ from __future__ import absolute_import, print_function, unicode_literals import sys from docopt import docopt from internetarchive import configure from internetarchive.exceptions import AuthenticationError def main(argv, session): args = docopt(__doc__, argv=argv) try: if args['--username'] and args['--password']: config_file_path = configure(args['--username'], args['--password'], session.config_file) print('Config saved to: {0}'.format(config_file_path)) else: print("Enter your Archive.org credentials below to con
figure 'ia'.\n") config_file_path = configure(config_file=session.config_file) print('\nConfig saved to: {0}'.format(config_file_path)) ex
cept AuthenticationError as exc: # TODO: refactor output so we don't have to have special cases # for adding newlines! if args['--username']: print('error: {0}'.format(str(exc))) else: print('\nerror: {0}'.format(str(exc))) sys.exit(1)
dropbox/changes
tests/changes/jobs/test_update_project_stats.py
Python
apache-2.0
1,357
0
from __future__ import absolute_import from changes.constants import Status, Result from changes.config import db from changes.jobs.update_project_stats import ( update_project_stats, update_project_plan_stats ) from changes.models.project import Project from changes.testutils import TestCase class UpdateProjectStatsTest(TestCase): def test_simple(self): project = self.create_project() self.create_build( project=project, status=Status.finished, result=Result.passed, duration=5050, ) update_project_stats(project_id=project.id.hex) db.session.expire(project) project = Project.query.get(project.id) assert project.avg_build_time == 5050 class UpdateProjectPlanStatsTest(TestCase): def test_simple(self): project = self.create_project() build = self.crea
te_build( project=
project, status=Status.finished, result=Result.passed, duration=5050, ) job = self.create_job(build) plan = self.create_plan(project) self.create_job_plan(job, plan) update_project_plan_stats( project_id=project.id.hex, plan_id=plan.id.hex, ) db.session.expire(plan) assert plan.avg_build_time == 5050
qizxdb/qizx-python
qizx/__init__.py
Python
mit
534
0
""" Qizx Python API bindings :copyright: (c) 2015 by Michael Paddon :license: MIT, see LICENSE for more details. """ from .qizx import ( Client, QizxError, QizxBadRequestError, QizxServerError, QizxNotFoundError
, QizxAccessControlError, QizxXMLDataError, QizxCompilationError, QizxEvaluationError, QizxTimeoutError, QizxImportError, UnexpectedResponseError, TransactionError ) __title__ = 'qizx' __version__ = '1.0.2' __author__ = "Michael Paddon" __license__ = '
MIT' __copyright__ = "Copyright 2015 Michael Paddon"
glarue-ol/sensorlab-observer
observer/m_sensorlab/frame_format.py
Python
mpl-2.0
9,968
0.004916
# -*- coding: utf-8 -*- # Generated by h2py from sensorlab-frame-format.h FIRST_BYTE = 0 LAST_BYTE = -1 NODE_ID_FIELD_LENGTH = 4 ENTITY_ID_FIELD_LENGTH = 1 EVENT_ID_FIELD_LENGTH = 1 PROPERTIES_COUNT_FIELD_LENGTH = 1 NAME_LENGTH_FIELD_LENGTH = 1 LINK_ID_FIELD_LENGTH = 1 FRAME_ID_FIELD_LENGTH = 1 FRAME_DATA_LENGTH_FIELD_LENGTH = 2 PROPERTY_ID_FIELD_LENGTH = 1 PROPERTY_UNIT_PREFIX_FIELD_LENGTH = 1 PROPERTY_UNIT_FIELD_LENGTH = 1 PROPERTY_TYPE_FIELD_LENGTH = 1 PROPERTY_VALUE_LENGTH_FIELD_LENGTH = 2 SENSORLAB_HEADER_LENGTH = (NODE_ID_FIELD_LENGTH + EVENT_ID_FIELD_LENGTH) NODE_HEADER_LENGTH = PROPERTIES_COUNT_FIELD_LENGTH ENTITY_HEADER_LENGTH = (ENTITY_ID_FIELD_LENGTH + PROPERTIES_COUNT_FIELD_LENGTH) PROPERTY_DECLARATION_HEADER_LENGTH = (PROPERTY_ID_FIELD_LENGTH + PROPERTY_UNIT_PREFIX_FIELD_LENGTH + PROPERTY_UNIT_FIELD_LENGTH + PROPERTY_TYPE_FIELD_LENGTH + NAME_LENGTH_FIELD_LENGTH + PROPERTY_VALUE_LENGTH_FIELD_LENGTH) PROPERTY_UPDATE_HEADER_LENGTH = (PROPERTY_ID_FIELD_LENGTH + PROPERTY_VALUE_LENGTH_FIELD_LENGTH) SENSORLAB_HEADER = FIRST_BYTE NODE_ID_FIELD = SENSORLAB_HEADER EVENT_ID_FIELD = (NODE_ID_FIELD + NODE_ID_FIELD_LENGTH) EVENT_PAYLOAD = (EVENT_ID_FIELD + EVENT_ID_FIELD_LENGTH) NODE_ADD_PAYLOAD = EVENT_PAYLOAD NODE_ADD_PROPERTIES_COUNT_FIELD = NODE_ADD_PAYLOAD NODE_ADD_PROPERTIES = (NODE_ADD_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) NODE_PROPERTY_ADD_PAYLOAD = EVENT_PAYLOAD NODE_PROPERTY_ADD_PROPERTIES_COUNT_FIELD = NODE_PROPERTY_ADD_PAYLOAD NODE_PROPERTY_ADD_PROPERTIES = (NODE_PROPERTY_ADD_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) NODE_PROPERTY_UPDATE_PAYLOAD = EVENT_PAYLOAD NODE_PROPERTY_UPDATE_PROPERTIES_COUNT_FIELD = NODE_PROPERTY_UPDATE_PAYLOAD NODE_PROPERTY_UPDATE_PROPERTIES = (NODE_PROPERTY_UPDATE_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) NODE_REMOVE_PAYLOAD = EVENT_PAYLOAD ENTITY_ADD_PAYLOAD = EVENT_PAYLOAD ENTITY_ADD_ENTITY_ID_FIELD = ENTITY_ADD_PAYLOAD ENTITY_ADD_NAME_LENGTH_FIELD = (ENTITY_ADD_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) ENTITY_ADD_PROPERTIES_COUNT_FIELD = (ENTITY_ADD_NAME_LENGTH_FIELD + NAME_LENGTH_FIELD_LENGTH) ENTITY_ADD_NAME_FIELD = (ENTITY_ADD_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) ENTITY_PROPERTY_ADD_PAYLOAD = EVENT_PAYLOAD ENTITY_PROPERTY_ADD_ENTITY_ID_FIELD = ENTITY_PROPERTY_ADD_PAYLOAD ENTITY_PROPERTY_ADD_PROPERTIES_COUNT_FIELD = (ENTITY_PROPERTY_ADD_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) ENTITY_PROPERTY_ADD_PROPERTIES = (ENTITY_PROPERTY_ADD_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) ENTITY_PROPERTY_UPDATE_PAYLOAD = EVENT_PAYLOAD ENTITY_PROPERTY_UPDATE_ENTITY_ID_FIELD = ENTITY_PROPERTY_UPDATE_PAYLOAD ENTITY_PROPERTY_UPDATE_PROPERTIES_COUNT_FIELD = (ENTITY_PROPERTY_UPDATE_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) ENTITY_PROPERTY_UPDATE_PROPERTIES = (ENTITY_PROPERTY_UPDATE_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) ENTITY_REMOVE_PAYLOAD = EVENT_PAYLOAD ENTITY_REMOVE_ENTITY_ID_FIELD = ENTITY_REMOVE_PAYLOAD LINK_ADD_PAYLOAD = EVENT_PAYLOAD LINK_ADD_ENTITY_ID_FIELD = LINK_ADD_PAYLOAD LINK_ADD_ID_FIELD = (LINK_ADD_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) LINK_ADD_SOURCE_PROPERTIES_COUNT_FIELD = (LINK_ADD_ID_FIELD + LINK_ID_FIELD_LENGTH) LINK_ADD_TARGET_PROPERTIES_COUNT_FIELD = (LINK_ADD_SOURCE_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) LINK_ADD_PROPERTIES_COUNT_FIELD = (LINK_ADD_TARGET_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) LINK_ADD_SOURCE_PROPERTIES = (LINK_ADD_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) LINK_PROPERTY_ADD_PAYLOAD = EVENT_PAYLOAD LINK_PROPERTY_ADD_ENTITY_ID_FIELD = LINK_PROPERTY_ADD_PAYLOAD LINK_PROPERTY_ADD_ID_FIELD = (LINK_PROPERTY_ADD_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) LINK_PROPERTY_ADD_PROPERTIES_COUNT_FIELD = (LINK_PROPERTY_ADD_ID_FIELD + LINK_ID_FIELD_LENGTH) LINK_PROPERTY_ADD_PROPERTIES = (LINK_PROPERTY_ADD_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) LINK_PROPERTY_UPDATE_PAYLOAD = EVENT_PAYLOAD LINK_PROPERTY_UPDATE_ENTITY_ID_FIELD = LINK_PROPERTY_ADD_PAYLOAD LINK_PROPERTY_UPDATE_ID_FIELD = (LINK_PROPERTY_UPDATE_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) LINK_PROPERTY_UPDATE_PROPERTIES_COUNT_FIELD = (LINK_PROPERTY_UPDATE_ID_FIELD + LINK_ID_FIELD_LENGTH) LINK_PROPERTY_UPDATE_PROPERTIES = (LINK_PROPERTY_UPDATE_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) LINK_REMOVE_PAYLOAD = EVENT_PAYLOAD LINK_REMOVE_ENTITY_ID_FIELD = LINK_REMOVE_PAYLOAD LINK_REMOVE_ID_FIELD = (LINK_REMOVE_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) FRAME_PRODUCE_PAYLOAD = EVENT_PAYLOAD FRAME_PRODUCE_ENTITY_ID_FIELD = FRAME_PRODUCE_PAYLOAD FRAME_PRODUCE_ID_FIELD = (FRAME_PRODUCE_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) FRAME_PRODUCE_DATA_LENGTH_FIELD = (FRAME_PRODUCE_ID_FIELD + FRAME_ID_FIELD_LENGTH) FRAME_PRODUCE_PROPERTIES_COUNT_FIELD = (FRAME_PRODUCE_DATA_LENGTH_FIELD + FRAME_DATA_LENGTH_FIELD_LENGTH) FRAME_PRODUCE_DATA_FIELD = (FRAME_PRODUCE_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) FRAME_RX_PAYLOAD = EVENT_PAYLOAD FRAME_RX_ENTITY_ID_FIELD = FRAME_RX_PAYLOAD FRAME_RX_ID_FIELD = (FRAME_RX_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) FRAME_RX_DATA_LENGTH_FIELD = (FRAME_RX_ID_FIELD + FRAME_ID_FIELD_LENGTH) FRAME_RX_PROPERTIES_COUNT_FIELD = (FRAME_RX_DATA_LENGTH_FIELD + FRAME_DATA_LENGTH_FIELD_LENGTH) FRAME_RX_DATA_FIELD = (FRAME_RX_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) FRAME_PROPERTY_ADD_PAYLOAD = EVENT_PAYLOAD FRAME_PROPERTY_ADD_ENTITY_ID_FIELD = FRAME_PROPERTY_ADD_PAYLOAD FRAME_PROPERTY_ADD_ID_FIELD = (FRAME_PROPERTY_ADD_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) FRAME_PROPERTY_ADD_PROPERTIES_COUNT_FIELD = (FRAME_PROPERTY_ADD_ID_FIELD + FRAME_ID_FIELD_LENGTH) FRAME_PROPERTY_ADD_PROPERTIES = (FRAME_PROPERTY_ADD_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) FRAME_PROPERTY_UPDATE_PAYLOAD = EVENT_PAYLOAD FRAME_PROPERTY_UPDATE_ENTITY_ID_FIELD = FRAME_PROPERTY_UPDATE_PAYLOAD FRAME_PROPERTY_UPDATE_ID_FIELD = (FRAME_PROPERTY_UPDATE_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) FRAME_PROPERTY_UPDATE_PROPERTIES_COUNT_FIELD = (FRAME_PROPERTY_UPDATE_ID_FIELD + FRAME_ID_FIELD_LENGTH) FRAME_PROPERTY_UPDATE_PROPERTIES = (FRAME_PROPERTY_UPDATE_PROPERTIES_COUNT_FIELD + PROPERTIES_COUNT_FIELD_LENGTH) FRAME_DATA_UPDATE_PAYLOAD = EVENT_PAYLOAD FRAME_DATA_UPDATE_ENTITY_ID_FIELD = FRAME_DATA_UPDATE_PAYLOAD FRAME_DATA_UPDATE_ID_FIELD = (FRAME_DATA_UPDATE_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) FRAME_DATA_UPDATE_DATA_LENGTH_FIELD = (FRAME_DATA_UPDATE_ID_FIELD + FRAME_ID_FIELD_LENGTH) FRAME_DATA_UPDATE_DATA_FIELD = (FRAME_DATA_UPDATE_DATA_LENGTH_FIELD + FRAME_DATA_LENGTH_FIELD_LENGTH) FRAME_TX_PAYLOAD = E
VENT_PAYLOAD FRAME_TX_ENTITY_ID_FIELD = FRAME_TX_PAYLOAD FRAME_TX_ID_FIELD = (FRAME_TX_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) FRAME_TX_DATA_LENGTH_FIELD = (FRAME_TX_ID_FIELD + FRAME_ID_FIELD_LENGTH) FRAME_TX_DATA_FIELD = (FRAME_TX_DATA_LENGTH_FIELD + FRAME_DATA_LENGTH_FIELD_LENGTH) FRAME_CONSUME_PAYLOAD = EVENT_PAYLOAD FRAME_CONSUME_ENT
ITY_ID_FIELD = FRAME_CONSUME_PAYLOAD FRAME_CONSUME_ID_FIELD = (FRAME_CONSUME_ENTITY_ID_FIELD + ENTITY_ID_FIELD_LENGTH) FRAME_CONSUME_DATA_LENGTH_FIELD = (FRAME_CONSUME_ID_FIELD + FRAME_ID_FIELD_LENGTH) FRAME_CONSUME_DATA_FIELD = (FRAME_CONSUME_DATA_LENGTH_FIELD + FRAME_DATA_LENGTH_FIELD_LENGTH) PROPERTY_DECLARATION_ID_FIELD = FIRST_BYTE PROPERTY_DECLARATION_UNIT_PREFIX_FIELD = (PROPERTY_DECLARATION_ID_FIELD + PROPERTY_ID_FIELD_LENGTH) PROPERTY_DECLARATION_UNIT_FIELD = (PROPERTY_DECLARATION_UNIT_PREFIX_FIELD + PROPERTY_UNIT_PREFIX_FIELD_LENGTH) PROPERTY_DECLARATION_TYPE_FIELD = (PROPERTY_DECLARATION_UNIT_FIELD + PROPERTY_UNIT_FIELD_LENGTH) PROPERTY_DECLARATION_NAME_LENGTH_FIELD = (PROPERTY_DECLARATION_TYPE_FIELD + PROPERTY_TYPE_FIELD_LENGTH) PROPERTY_DECLARATION_VALUE_LENGTH_FIELD = (PROPERTY_DECLARATION_NAME_LENGTH_FIELD + NAME_LENGTH_FIELD_LENGTH) PROPERTY_UPDATE_ID_FIELD = FIRST_BYTE PROPERTY_UPDATE_VALUE_LENGTH_FIELD = (PROPERTY_UPDATE_ID_FIELD + PROPERTY_ID_FIELD_LENGTH) EVENT_NODE_ADD = 0x00 EVENT_NODE_PROPERTY_ADD = 0x01 EVENT_NODE_PROPERTY_UPDATE = 0x02 EVENT_NODE_REMOVE = 0x03 EVENT_ENTITY_ADD = 0x10 EVENT_ENTITY_PROPERTY_
ScienceWorldCA/domelights
backend/artnet-bridge/artnet/scripts/alternating_color_fades.py
Python
apache-2.0
776
0.03866
import time, logging from artnet import dmx, fixtures, rig
from artnet.dmx import fades log = logging.getLogger(__name__) # set up test fixtures r = rig.get_default_rig() g = r
.groups['all'] def all_red(): """ Create an all-red frame. """ g.setColor('#ff0000') g.setIntensity(255) return g.getFrame() def all_blue(): """ Create an all-blue frame. """ g.setColor('#0000ff') g.setIntensity(255) return g.getFrame() def main(config, controller=None): log.info("Running script %s" % __name__) # global g # g = get_default_fixture_group(config) q = controller or dmx.Controller(config.get('base', 'address'), bpm=60, nodaemon=True, runout=True) q.add(fades.create_multifade([ all_red(), all_blue(), ] * 3, secs=5.0)) if not controller: q.start()
Ganben/solverify
analyze/ethereum_data1.py
Python
gpl-3.0
508
0.021654
#encoding=utf-8 # th
is the interface to create your own data source # this class pings a private / public blockchain to get the balance and code information from web3 import Web3, KeepAliveRPCProvider class EthereumData: def __init__(self): self.host = 'x.x.x.x' self.port = '8545' self.web3 = Web3(KeepAliveRPCProvider(host=self.host, port=self.port)) def getBalance(self
, address): return self.web3.eth.getBalance(address) def getCode(self, address): return self.web3.eth.getCode(address)
google/jax
jax/_src/lax/windowed_reductions.py
Python
apache-2.0
42,865
0.007652
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import (Any, Callable, Optional, Sequence, Union, Tuple) import warnings import numpy as np from jax.interpreters import ad from jax.interpreters import batching from jax.interpreters import mlir from jax.interpreters import xla from jax import core from jax.core import (ShapedArray, ConcreteArray) from jax import tree_util from jax._src import ad_util from jax._src import dtypes import jax._src.lax.lax as lax import jax._src.lax.convolution as convolution import jax._src.lax.slicing as slicing from jax._src.lib.mlir import ir from jax._src.lib.mlir.dialects import mhlo from jax._src.lib import xla_bridge from jax._src.lib import xla_client import jax._src.util as util map = util.safe_map zip = util.safe_zip xb = xla_bridge xc = xla_client xops = xla_client.ops Array = Any def reduce_window(operand, init_value, computation: Callable, window_dimensions: core.Shape, window_strides: Sequence[int], padding: Union[str, Sequence[Tuple[int, int]]], base_dilation: Optional[Sequence[int]] = None, window_dilation: Optional[Sequence[int]] = None) -> Array: """Wraps XLA's `ReduceWindowWithGeneralPadding <https://www.tensorflow.org/xla/operation_semantics#reducewindow>`_ operator. """ flat_operands, operand_tree = tree_util.tree_flatten(operand) flat_init_values, init_value_tree = tree_util.tree_flatten(init_value) if operand_tree != init_value_tree: raise ValueError('Operands must have the same tree structure as ' f'init_values: {operand_tree} vs. {init_value_tree}') if len(flat_operands) == 0: raise ValueError('reduce_window must have at least one operand.') if len(flat_operands) != len(flat_init_values): raise ValueError('Must have same total number of operands as init_values: ' f' {len(flat_operands)} vs. {len(flat_init_values)}') if isinstance(padding, str): dilated_window_dims = ( window_dimensions if window_dilation is None else lax._dilate_shape(window_dimensions, window_dilation)) padding = tuple(lax.padtype_to_pads( flat_operands[0].shape, dilated_window_dims, window_strides, padding)) else: padding = tuple(padding) if base_dilation is None: base_dilation = (1,) * len(window_dimensions) if window_dilation is None: window_dilation = (1,) * len(window_dimensions) monoid_reducer = _get_monoid_window_reducer(computation, flat_init_values) if monoid_reducer: return monoid_reducer(operand, window_dimensions, window_strides, padding, base_dilation, window_dilation) else: flat_init_avals = map(lax._abstractify, f
lat_init_values) jaxpr, consts, out_tree = lax._variadic_reduction_jaxpr( computation, tuple(flat_init_avals), init_value_tree) if operand_tree != out_tree: rais
e ValueError( 'reduce_window output must have the same tree structure as the operands' f' {operand_tree} vs. {out_tree}') out_flat = reduce_window_p.bind( *(flat_operands + flat_init_values), jaxpr=jaxpr, consts=consts, window_dimensions=tuple(window_dimensions), window_strides=tuple(window_strides), padding=padding, base_dilation=tuple(base_dilation), window_dilation=tuple(window_dilation)) return tree_util.tree_unflatten(out_tree, out_flat) def _get_monoid_window_reducer(monoid_op: Callable, xs: Sequence[Array]) -> Optional[Callable]: if len(xs) != 1: return None x, = xs aval = core.get_aval(x) if (type(aval) is ConcreteArray) and aval.shape == (): if monoid_op is lax.add: return aval.val == 0 and _reduce_window_sum elif monoid_op is lax.max: return (aval.val == lax._get_max_identity(aval.dtype) and _reduce_window_max) elif monoid_op is lax.min: return (aval.val == lax._get_min_identity(aval.dtype) and _reduce_window_min) return None def _reduce_window_sum(operand: Array, window_dimensions: core.Shape, window_strides: Sequence[int], padding: Sequence[Tuple[int, int]], base_dilation: Optional[Sequence[int]] = None, window_dilation: Optional[Sequence[int]] = None) -> Array: if base_dilation is None: base_dilation = (1,) * len(window_dimensions) if window_dilation is None: window_dilation = (1,) * len(window_dimensions) return reduce_window_sum_p.bind( operand, window_dimensions=tuple(window_dimensions), window_strides=tuple(window_strides), padding=tuple(padding), base_dilation=tuple(base_dilation), window_dilation=tuple(window_dilation)) def _reduce_window_prod(operand: Array, window_dimensions: core.Shape, window_strides: Sequence[int], padding: Sequence[Tuple[int, int]], base_dilation: Optional[Sequence[int]] = None, window_dilation: Optional[Sequence[int]] = None) -> Array: init_value = lax._const(operand, 1) jaxpr, consts = lax._reduction_jaxpr(lax.mul, lax._abstractify(init_value)) if base_dilation is None: base_dilation = (1,) * len(window_dimensions) if window_dilation is None: window_dilation = (1,) * len(window_dimensions) out, = reduce_window_p.bind( operand, init_value, jaxpr=jaxpr, consts=consts, window_dimensions=tuple(window_dimensions), window_strides=tuple(window_strides), padding=tuple(padding), base_dilation=tuple(base_dilation), window_dilation=tuple(window_dilation)) return out def _reduce_window_max(operand: Array, window_dimensions: core.Shape, window_strides: Sequence[int], padding: Sequence[Tuple[int, int]], base_dilation: Optional[Sequence[int]] = None, window_dilation: Optional[Sequence[int]] = None) -> Array: if base_dilation is None: base_dilation = (1,) * len(window_dimensions) if window_dilation is None: window_dilation = (1,) * len(window_dimensions) return reduce_window_max_p.bind( operand, window_dimensions=tuple(window_dimensions), window_strides=tuple(window_strides), padding=tuple(padding), base_dilation=tuple(base_dilation), window_dilation=tuple(window_dilation)) def _reduce_window_min(operand: Array, window_dimensions: core.Shape, window_strides: Sequence[int], padding: Sequence[Tuple[int, int]], base_dilation: Optional[Sequence[int]] = None, window_dilation: Optional[Sequence[int]] = None) -> Array: if base_dilation is None: base_dilation = (1,) * len(window_dimensions) if window_dilation is None: window_dilation = (1,) * len(window_dimensions) return reduce_window_min_p.bind( operand, window_dimensions=tuple(window_dimensions), window_strides=tuple(window_strides), padding=tuple(padding), base_dilation=tuple(base_dilation), window_dilation=tuple(window_dilation)) def _select_and_scatter(operand: Array, select: Callable, window_dimensions: core.Shape, window_strides: Sequence[int], padding: Sequence[Tuple[int, int]], source: Array, init_value: Array, scatter: Callable) -> Array: select_jaxpr, select_consts = lax._reduction_jaxpr( select, lax._abstractify(init_value))
spradeepv/dive-into-python
hackerrank/domain/python/sets/intersection.py
Python
mit
1,336
0.005988
""" Task Students of District College have subscription of English and French newspapers. Some students have subscribed to only English, some have subscribed to only French and some have subscribed to both newspapers. You are given two sets of roll numbers of students, who have subscribed to English and French newspapers. Your task is to find total number of students who have subscribed to both newspapers. Input Format First line contains, number of students who have subscribed to English newspaper. Second line contains, space separated list of roll numbers of students, who have subscribed to English newspaper. Third line contains, number of students who have subscribed to French newspaper. Fourth line contains, space separated list of roll numbers of students, who have subscribed to French newspaper. Constraints 0<Total nu
mber of students in college<1000 Output Format Output total number of students who have su
bscriptions in both English and French. Sample Input 9 1 2 3 4 5 6 7 8 9 9 10 1 2 3 11 21 55 6 8 Sample Output 5 Explanation Roll numbers of students who have both subscriptions: 1, 2, 3, 6 and 8. Hence, total is 5 students. """ n1 = int(raw_input()) english = set(map(int, raw_input().split())) n2 = int(raw_input()) french = set(map(int, raw_input().split())) print len(english.intersection(french))
aalhour/PyCOOLC
pycoolc/semanalyser.py
Python
mit
18,923
0.003701
#!/usr/bin/env python3 # ----------------------------------------------------------------------------- # semanalyser.py # # Author: Ahmad Alhour (aalhour.com). # Date: TODO # Description: The Semantic Analyser module. Implements Semantic Analysis and # Type Checking. # ----------------------------------------------------------------------------- """ # Semantic Analysis ## Checks 1. All identifiers are declared. 2. Types. 3. Inheritance relationships. 4. Classes defined only once. 5. Methods in a class defined only once. 6. Reserved identifiers are not misused. ## Scope ### Identifier Bindings: Cool Identifier Bindings are introduced by: * Class declarations (introduce class names) * Method definitions (introduce method names) – Let expressions (introduce object id’s) * Formal parameters (introduce object id’s) * Attribute definitions (introduce object id’s) * Case expressions (introduce object id’s) ### Class Definitions: * Cannot be nested. * Are globally visible throughout the program. * Class names can be used before they are defined. ### Class Attributes: * Attribute names are global within the class in which they are defined ### Class Methods: * Method names have complex rules. * A method need not be defined in the class in which it is used, but in some parent class. * Methods may also be redefined (overridden). ## Type System ### Type Operations: * Type Checking. The process of verifying fully typed programs * Type Inference. The process of filling in missing type information ### Types in Cool: 1. Class names: Builtins (Int; String; Bool; Object; IO) and User Defined. 2. SELF_TYPE. ### Sub-Typing: * Types can be thought of as sets of attributes and operations defined on these sets. * All types are subtypes of the `Object` type. * Types can inherit from other types other than the `Object` type. * No type is allowed to inherit from the following types only: `Int`, `Bool`, `String` and `SELF_TYPE`. * All type relations can be thought of as a tree where `Object` is at the root and all other types branching down from it, this is also called the `inheritance tree`. * A least upper bound (`lub`) relation of two types is their least common ancestor in the inheritance tree. * Subclasses only add attributes or methods. * Methods can be redefined but with same type. * All operations that can be used on type `C` can also be used on type `C'`, where `C'` <= `C`, meaning `C'` is a subtype of `C`. ### Typing Methods: * Method and Object identifiers live in different name spaces. + A method `foo` and an object `foo` can coexist in the same scope. * Logically, Cool Type Checking needs the following 2 Type Environments: + `O`: a function providing mapping from types to Object Identifiers and vice versa. + `M`: a function providing mapping from types to Method Names and vice versa. * Due to `SELF_TYPE`, we need to know the class name at all points of Type Checking methods. + `C`: a function providing the name of the current class (Type). ### SELF_TYPE: `SELF_TYPE` is not a Dynamic Type, it is a Static Type. `SELF_TYPE` is the type of the `self` parameter in an instance. In a method dispatch, `SELF_TYPE` might be a subtype of the class in which the subject method appears. #### Usage: * `SELF_TYPE` can be used with `new T` expressions. * `SELF_TYPE` can be used as the return type of class methods. * `SELF_TYPE` can be used as the type of expressions (i.e. let expressions: `let x : T in expr`). * `SELF_TYPE` can be used as the type of the actual arguments in a method dispatch. * `SELF_TYPE` can **not** be used as the type of c
lass attributes. * `SELF_TYPE` can **not** be used with Static Dispatch (i.e. `T` i
n `m@T(expr1,...,exprN)`). * `SELF_TYPE` can **not** be used as the type of Formal Parameters. #### Least-Upper Bound Relations: * `lub(SELF_TYPE.c, SELF_TYPE.c) = SELF_TYPE.c`. * `lub(SELF_TYPE.c, T) = lub(C, T)`. * `lub(T, SELF_TYPE.c) = lub(C, T)`. ## Semantic Analysis Passes **[incomplete]** 1. Gather all class names. 2. Gather all identifier names. 3. Ensure no undeclared identifier is referenced. 4. Ensure no undeclared class is referenced. 3. Ensure all Scope Rules are satisfied (see: above). 4. Compute Types in a bottom-up pass over the AST. ## Error Recovery Two solutions: 1. Assign the type `Object` to ill-typed expressions. 2. Introduce a new type called `No_Type` for use with ill-typed expressions. Solution 1 is easy to implement and will enforce the type inheritance and class hierarchy tree structures. Solution 2 will introduce further adjustments. First, every operation will be treated as defined for `No_Type`. Second, the inheritance tree and class hierarchy will change from being Trees to Graphs. The reason for that is that expressions will ultimately either be of type `Object` or `No_Type`, which will make the whole representation look like a graph with two roots. """ from logging import info, debug, warning, critical from collections import defaultdict from typing import Dict, Set, AnyStr, Tuple import pycoolc.ast as AST # ----------------------------------------------------------------------------- # # GLOBALS AND CONSTANTS # # ----------------------------------------------------------------------------- # Un-boxed Primitive Value Type UNBOXED_PRIMITIVE_VALUE_TYPE = "__prim_slot" IO_CLASS = "IO" OBJECT_CLASS = "Object" INTEGER_CLASS = "Int" BOOLEAN_CLASS = "Bool" STRING_CLASS = "String" # ----------------------------------------------------------------------------- # # HELPERS: Exceptions, Symbol Tables and Setup Methods # # ----------------------------------------------------------------------------- class SemanticAnalysisError(Exception): pass class SemanticAnalysisWarning(Warning): pass # ----------------------------------------------------------------------------- # # MAIN SEMANTIC ANALYSER API CLASS # # ----------------------------------------------------------------------------- class PyCoolSemanticAnalyser(object): def __init__(self): """ TODO :param program_ast: TODO :return: None """ super(PyCoolSemanticAnalyser, self).__init__() # Initialize the internal program ast instance. self._program_ast = None # Classes Map: maps each class name (key: String) to its class instance (value: AST.Class). # Dict[AnyStr, AST.Class] self._classes_map = dict() # Class Inheritance Graph: maps a parent class (key: String) to a unique collection of its # children classes (value: set). # Dict[AnyStr, Set] self._inheritance_graph = defaultdict(set) # ######################################################################### # PUBLIC # # ######################################################################### def transform(self, program_ast: AST.Program) -> AST.Program: """ TODO :param program_ast: TODO :return: TODO """ if program_ast is None: raise ValueError("Program AST object cannot be None!") elif not isinstance(program_ast, AST.Program): raise TypeError("Program AST object is not of type \"AST.Program\"!") self._init_collections(program_ast) # Run some passes self._default_undefined_parent_classes_to_object() self._invalidate_inheritance_from_builtin_classes() self._check_cyclic_inheritance_relations() return self._program_ast # ######################################################################### # PRIVATE # # ######################################################################### def _init_collections(self, program_ast: AST.Program) -> None: """ TODO :param program_ast: TODO :ret
miniconfig/home-assistant
homeassistant/components/zwave/util.py
Python
mit
3,178
0
"""Zwave util methods.""" import logging from . import const _LOGGER = logging.getLogger(__name__) def check_node_schema(node, schema): """Check if node matches the passed node schema.""" if (const.DISC_NODE_ID in schema and node.node_id not in schema[const.DISC_NODE_ID]): _LOGGER.debug("node.node_id %s not in node_id %s", node.node_id, schema[const.DISC_NODE_ID]) return False if (const.DISC_GENERIC_DEVICE_CLASS in schema and node.generic not in schema[const.DISC_GENERIC_DEVICE_CLASS]): _LOGGER.debug("node.generic %s not in generic_device_class %s", node.generic, schema[const.DISC_GENERIC_DEVICE_CLASS]) return False if (const.DISC_SPECIFIC_DEVICE_CLASS in schema and node.specific not in schema[const.DISC_SPECIFIC_DEVICE_CLASS]): _LOGGER.debug("node.specific %s not in specific_device_class %s", node.specific, schema[const.DISC_SPECIFIC_DEVICE_CLASS]) return False return True def check_value_schema(value, schema): """Check if the value matches th
e passed value schema.""" if (const.DISC_COMMAND_CLASS in schema and value.command_class n
ot in schema[const.DISC_COMMAND_CLASS]): _LOGGER.debug("value.command_class %s not in command_class %s", value.command_class, schema[const.DISC_COMMAND_CLASS]) return False if (const.DISC_TYPE in schema and value.type not in schema[const.DISC_TYPE]): _LOGGER.debug("value.type %s not in type %s", value.type, schema[const.DISC_TYPE]) return False if (const.DISC_GENRE in schema and value.genre not in schema[const.DISC_GENRE]): _LOGGER.debug("value.genre %s not in genre %s", value.genre, schema[const.DISC_GENRE]) return False if (const.DISC_READONLY in schema and value.is_read_only is not schema[const.DISC_READONLY]): _LOGGER.debug("value.is_read_only %s not %s", value.is_read_only, schema[const.DISC_READONLY]) return False if (const.DISC_WRITEONLY in schema and value.is_write_only is not schema[const.DISC_WRITEONLY]): _LOGGER.debug("value.is_write_only %s not %s", value.is_write_only, schema[const.DISC_WRITEONLY]) return False if (const.DISC_LABEL in schema and value.label not in schema[const.DISC_LABEL]): _LOGGER.debug("value.label %s not in label %s", value.label, schema[const.DISC_LABEL]) return False if (const.DISC_INDEX in schema and value.index not in schema[const.DISC_INDEX]): _LOGGER.debug("value.index %s not in index %s", value.index, schema[const.DISC_INDEX]) return False if (const.DISC_INSTANCE in schema and value.instance not in schema[const.DISC_INSTANCE]): _LOGGER.debug("value.instance %s not in instance %s", value.instance, schema[const.DISC_INSTANCE]) return False return True
joberreiter/pyload
module/plugins/crypter/XFileSharingProFolder.py
Python
gpl-3.0
2,857
0.011551
# -*- coding: utf-8 -*- import re from module.plugins.internal.XFSCrypter import XFSCrypter, create_getInfo class XFileSharingProFolder(XFSCrypter): __name__ = "XFileSharingProFolder" __type__ = "crypter" __version__ = "0.14" __status__ = "testing" __pattern__ = r'https?://(?:www\.)?(?:\w+\.)*?(?P<DOMAIN>(?:[\d.]+|[\w\-^_]{3,}(?:\.[a-zA-Z]{2,}){1,2})(?:\:\d+)?)/(?:user|folder)s?/\w+' __config__ = [("use_subfolder" , "bool", "Save package to subfolder" , True), ("subfolder_per_pack", "bool", "Create a subfolder for each package", True)] __description__ = """XFileSharingPro dummy folder decrypter plugin for hook""" __license__ = "GPLv3" __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] def _log(self, level, plugintype, pluginname, messages): return super(XFileSharingProFolder, self)._log(level, plugintype, "%s: %s" % (pluginname, self.PLUGIN_NAME), messages) def init(self): super(XFileSharingProFolder, self).init() self.__pattern__ = self.pyload.pluginManager.crypterPlugins[self.__name__]['pattern'] self.PLUGIN_DOMAIN = re.match(self.__pattern__, self.pyfile.url).group("DOMAIN").lower() self.PLUGIN_NAME = "".join(part.capitalize() for part in re.split(r'(\.|\d+|-)', self.PLUGIN_DOMAIN) if part != '.') def _setup(self): account_name = self.__name__ if self.account.PLUGIN_DOMAIN is None else self.PLUGIN_NAME self.chunk_limit = 1 self.multiDL = True if self.account: self.req = self.pyload.requestFactory.getRequest(accountname, self.account.user) self.premium = self.account.premium self.resume_download = self.premium else: self.req = self.pyload.requestFactory.getRequest(account_name) self.premium = False self.resume_download = False def load_account(self): if self.req:
self.req.close() if not self.account: self.account = self.pyload.accountManager.getAccountPlugin(self.PLUGIN_NAME) if not self.account: self.account = self.pyload.accountManager.getAccountPlugin(self.__name__) if self.account: if not self.account.PLUGIN_DOMAIN: self.account.PLUGIN_DOMAIN = self.PLUGIN_DOMAIN if not self.account.user: #@TODO: Move to `Account
` in 0.4.10 self.account.user = self.account.select()[0] if not self.account.logged: self.account = False getInfo = create_getInfo(XFileSharingProFolder)
gotostack/swift
test/probe/test_object_handoff.py
Python
apache-2.0
9,005
0
#!/usr/bin/python -u # Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from subprocess import call, Popen from unittest import main, TestCase from uuid import uuid4 from swiftclient import client from swift.common import direct_client from swift.common.exceptions import ClientException from test.probe.common import kill_server, kill_servers, reset_environment, \ start_server class TestObjectHandoff(TestCase): def setUp(self): (self.pids, self.port2server, self.account_ring, self.container_ring, self.object_ring, self.url, self.token, self.account, self.configs) = reset_environment() def tearDown(self): kill_servers(self.port2server, self.pids) def test_main(self): # Create container # Kill one container/obj primary server # Create container/obj (goes to two primary servers and one handoff) # Kill other two container/obj primary servers # Indirectly through proxy assert we can get container/obj # Restart those other two container/obj primary servers # Directly to handoff server assert we can get container/obj # Assert container listing (via proxy and directly) has container/obj # Bring the first container/obj primary server back up # Assert that it doesn't have container/obj yet # Run object replication, ensuring we run the handoff node last so it # should remove its extra handoff partition # Assert the first container/obj primary server now has container/obj # Assert the handoff server no longer has container/obj # Kill the first container/obj primary server again (we have two # primaries and the handoff up now) # Delete container/obj # Assert we can't head container/obj # Assert container/obj is not in the container listing, both indirectly # and directly # Restart the first container/obj primary server again # Assert it still has container/obj # Run object replication, ensuring we run the handoff node last so it # should remove its extra handoff partition # Assert primary node no longer has container/obj container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] kill_server(onode['port'], self.port2server, self.pids) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill all primaries to ensure GET handoff works for node in onodes[1:]: kill_server(node['port'], self.port2server, self.pids) odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) for node in onodes[1:]: start_server(node['port'], self.port2server, self.pids) # We've indirectly verified the handoff node has the object, but let's # directly verify it. another_onode = self.object_ring.get_more_nodes(opart).next() odata = direct_client.direct_get_object( another_onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.acco
unt, container)[1]] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) start_server(onode['port'], self.port2server, self.pids) exc = None try: direct_client.direct_get_object(onode, opart, sel
f.account, container, obj) except ClientException as err: exc = err self.assertEquals(exc.http_status, 404) # Run the extra server last so it'll remove its extra partition processes = [] for node in onodes: try: port_num = node['replication_port'] except KeyError: port_num = node['port'] processes.append(Popen(['swift-object-replicator', self.configs['object-replicator'] % ((port_num - 6000) / 10), 'once'])) for process in processes: process.wait() try: another_port_num = another_onode['replication_port'] except KeyError: another_port_num = another_onode['port'] call(['swift-object-replicator', self.configs['object-replicator'] % ((another_port_num - 6000) / 10), 'once']) odata = direct_client.direct_get_object(onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) exc = None try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except ClientException as err: exc = err self.assertEquals(exc.http_status, 404) kill_server(onode['port'], self.port2server, self.pids) client.delete_object(self.url, self.token, container, obj) exc = None try: client.head_object(self.url, self.token, container, obj) except client.ClientException as err: exc = err self.assertEquals(exc.http_status, 404) objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj in objs: raise Exception('Container listing still knew about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] if obj in objs: raise Exception( 'Container server %s:%s still knew about object' % (cnode['ip'], cnode['port'])) start_server(onode['port'], self.port2server, self.pids) direct_client.direct_get_object(onode, opart, self.account, container, obj) # Run the extra server last so it'll remove its extra partition processes = [] for node in onodes: try: port_num = node['replication_port'] excep
mbareta/edx-platform-ft
lms/djangoapps/ccx/migrations/0018_auto_20170721_0611.py
Python
agpl-3.0
472
0.002119
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import datetime class
Migration(migrations.Migration): dependencies = [ ('ccx', '0017_auto_20170721_0437'), ] operations = [ migrations.AlterField( model_name='customcourseforedx', name='time', field=models.DateTimeField(default=datetime.datetime(2017, 7, 21, 6, 10
, 51, 471098)), ), ]
joelsmith/openshift-tools
ansible/roles/lib_git/library/git_rebase.py
Python
apache-2.0
15,173
0.002504
#!/usr/bin/env python # ___ ___ _ _ ___ ___ _ _____ ___ ___ # / __| __| \| | __| _ \ /_\_ _| __| \ # | (_ | _|| .` | _|| / / _ \| | | _|| |) | # \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ # | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _| # | |) | (_) | | .` | (_) || | | _|| |) | | | | # |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_| """Run an ssh agent and set SSH_AUTH_SOCK so that clients will use it Example: with ssh_agent.SshAgent() as agent: agent.add_key(private_key_string) # do ssh stuff # as agent loses scope, the ssh agent is killed """ from __future__ import with_statement import atexit import tempfile import os import sys import shutil import subprocess import random import time import datetime class SshAgentException(Exception): """An exception thrown for problems in SshAgent """ def __init__(self, message): # Call the base class constructor with the parameters it needs super(SshAgentException, self).__init__(message) class SshAgent(object): """Run an ssh agent and set SSH_AUTH_SOCK so that clients will use it. The running agent can have one or more keys added (via the SshAgent.add_key() method or via any other method that can find and talk to the running agent. """ class Cleanup(object): """A helper functor class for SshAgent An object of this class can be passed directly to atexit, which will call __call__() when the program exits """ def __init__(self, ssh_agent, ssh_auth_sock_dir): self.ssh_agent = ssh_agent self.ssh_auth_sock_dir = ssh_auth_sock_dir self.cleaned_up = False self.original_env_var = os.environ.get('SSH_AUTH_SOCK') def __call__(self): if self.cleaned_up: return self.cleaned_up = True try: shutil.rmtree(self.ssh_auth_sock_dir, ignore_errors=True) except OSError: pass try: self.ssh_agent.kill() except OSError: pass if self.original_env_var: os.environ['SSH_AUTH_SOCK'] = self.original_env_var else: del os.environ['SSH_AUTH_SOCK'] def pass_(self): """A function to appease pylint""" pass def pass__(self): """Another function to appease pylint""" self.pass_() def __init__(self): devnull = open(os.devnull, 'w') # Start an ssh-agent process and register it to be killed atexit self.ssh_auth_sock_dir = tempfile.mkdtemp(prefix=os.path.basename(sys.argv[0]) + '.') self.ssh_auth_sock = os.path.join(self.ssh_auth_sock_dir, "ssh_agent") self.ssh_agent = subprocess.Popen(["ssh-agent", "-d", "-a", self.ssh_auth_sock], stdout=devnull, stderr=devnull) self.cleanup = self.Cleanup(self.ssh_agent, self.ssh_auth_sock_dir) # this is here so that when python exits, we make sure that the agent is killed # (in case python exits before our __del__() is called atexit.register(self.cleanup) os.environ["SSH_AUTH_SOCK"] = self.ssh_auth_sock def __enter__(self): return self def __exit__(self, exc_type, exc_value, tback): self.cleanup() def __del__(self): self.cleanup() def kill(self): '''Explicitly kill the running ssh-agent It's not necessary to call this function as the agent will be cleaned up automatically. ''' self.cleanup() def add_key(self, key): """Add a key to the running agent. Note: This function can be called any number of times to add multiple keys. Args: key (str): A string containing the ssh private key to be added (the actual key data, not the filename of a key) Raises: SshAgentException: when ssh-add does not immediately return (as in the case of a private key with a passphrase) """ #if self.ssh_agent.poll() is None: # raise SshAgentException("Unable to add ssh key. Did agent die?") named_pipe_path = os.path.join(self.ssh_auth_sock_dir, "keypipe." + str(random.getrandbits(64))) try: os.mkfifo(named_pipe_path, 0600) except OSError, exception: print "Failed to create FIFO: %s" % exception devnull = open(os.devnull, 'w') ssh_add = subprocess.Popen(["ssh-add", named_pipe_path], stdout=devnull, stderr=devnull) fifo = open(named_pipe_path, 'w') print >> fifo, key fifo.close() #Popen.wait() doesn't have a timeout, so we'll implement one using poll() :( start_time = datetime.datetime.now() while ssh_add.poll() is None: if (datetime.datetime.now() - start_time).total_seconds() > 5: try: ssh_add.kill() except OSError: pass raise SshAgentException("Unable to add ssh key. Timed out. Does key have a passphrase?") time.sleep(0.1) os.remove(named_pipe_path) # pylint: disable=too-many-lines # these are already imported inside of the ssh library #import os #import subprocess class GitCLIError(Exception): '''Exception class for openshiftcli''' pass # pylint: disable=too-few-public-methods class GitCLI(object): ''' Class to wrap the command line tools ''' def __init__(self, path, verbose=False, ssh_key=None, author=None): ''' Constructor for GitCLI ''' self.path = path self.verbose = verbose self.ssh_key = ssh_key self.author = author self.environment_vars = os.environ.copy() if self.author: author_dict = {} author_list = author.split('<') author_dict['GIT_COMMITTER_NAME'] = author_list[0].strip() author_dict['GIT_COMMITTER_EMAIL'] = author_list[0].strip() self.environment_vars.update(author_dict) def _add(self, files_to_add=None): ''' git add ''' cmd = ["add", "--no-ignore-removal"] if files_to_add: cmd.extend(files_to_add) else: cmd.append('.') results = self.git_cmd(cmd) return results def _commit(self, msg, author=None): ''' git commit with message ''' cmd = ["commit", "-m", msg] if author: cmd += ["--author", author] results = self.git_cmd(cmd) return results def _clone(self, repo, dest, bare=False): ''' git clone ''
' cmd = ["clone"] if bare: cmd += ["--bare"] cmd += [repo, dest] results = self.git_cmd(cmd) return results def _status(self, porcelain=False, show_untracked=True): ''' Do a git status ''' cmd = ["status"] if porcelain: cmd.append('--porcelain') if show_untracked: cmd.append('--untracked-files=normal') else: cmd.
append('--untracked-files=no') results = self.git_cmd(cmd, output=True, output_type='raw') return results def _checkout(self, branch): ''' Do a git checkout to <branch> ''' cmd = ["checkout", branch] results = self.git_cmd(cmd, output=True, output_type='raw') return results def _get_current_branch(self): ''' Do a git checkout to <branch> ''' cmd = ["describe", "--contains", "--all", "HEAD"] results = self.git_cmd(cmd, output=True, output_type='raw') results['results'] = results['results'].rstrip() return results def _merge(self, merge_id): ''' Do a git checkout to <branch> ''' cmd = ["merge", merge_id] results = self.git_cmd(cmd, output=True, output_type='raw') return results def _push(self, remote, src_branch, dest_branch): ''' Do a git checkou
mozilla/pymake
pymake/parserdata.py
Python
mit
33,522
0.002357
from __future__ import print_function import logging, re, os import data, parser, util from pymake.globrelative import hasglob, glob from pymake import errors try: from cStringIO import StringIO except ImportError: from io import StringIO _log = logging.getLogger('pymake.data') _tabwidth = 4 class Location(object): """ A location within a makefile. For the moment, locations are just path/line/column, but in the future they may reference parent locations for more accurate "included from" or "evaled at" error reporting. """ __slots__ = ('path', 'line', 'column') def __init__(self, path, line, column): self.path = path self.line = line self.column = column def offset(self, s, start, end): """ Returns a new location offset by the specified string. """ if start == end: return self skiplines = s.count('\n', start, end) line = self.line + skiplines if skiplines: lastnl = s.rfind('\n', start, end) assert lastnl != -1 start = lastnl + 1 column = 0 else: column = self.column while True: j = s.find('\t', start, end) if j == -1: column += end - start break column += j - start column += _tabwidth column -= column % _tabwidth start = j + 1 return Location(self.path, line, column) def __str__(self): return "%s:%s:%s" % (self.path, self.line, self.column) def _expandwildcards(makefile, tlist): for t in tlist: if not hasglob(t): yield t else: l = glob(makefile.workdir, t) for r in l: yield r _flagescape = re.compile(r'([\s\\])') def parsecommandlineargs(args): """ Given a set of arguments from a command-line invocation of make, parse out the variable definitions and return (stmts, arglist, overridestr) """ overrides = [] stmts = StatementList() r = [] for i in range(0, len(args)): a = args[i] vname, t, val = util.strpartition(a, ':=') if t == '': vname, t, val = util.strpartition(a, '=') if t != '': overrides.append(_flagescape.sub(r'\\\1', a)) vname = vname.strip() vnameexp = data.Expansion.fromstring(vname, "Command-line argument") stmts.append(ExportDirective(vnameexp, concurrent_set=True)) stmts.append(SetVariable(vnameexp, token=t, value=val, valueloc=Location('<command-line>', i, len(vname) + len(t)), targetexp=None, source=data.Variables.SOURCE_COMMANDLINE)) else: r.append(data.stripdotslash(a)) return stmts, r, ' '.join(overrides) class Statement(object): """ Represents parsed make file syntax. This is an abstract base class. Child classes are expected to implement basic methods defined below. """ def execute(self, makefile, context): """Executes this Statement within a make file execution context.""" raise Exception("%s must implement execute()." % self.__class__) def to_source(self): """Obtain the make file "source" representation of the Statement. This converts an individual Statement back to a string that can again be parsed into this Statement. """ raise Exception("%s must implement to_source()." % self.__class__) def __eq__(self, other): raise Exception("%s must implement __eq__." % self.__class__) def __ne__(self, other): return self.__eq__(other) class DummyRule(object): __slots__ = () def addcommand(self, r): pass class Rule(Statement): """ Rules represent how to make specific targets. See https://www.gnu.org/software/make/manual/make.html#Rules. An individual rule is composed of a target, dependencies, and a recipe. This class only contains references to the first 2. The recipe will be contained in Comman
d classes which follow this one in a stream of Statement instances. Instances also contain a boolean property `doublecolon` which says whether this is a doublecolon rule. Doublecolon rules are rules that are always executed, if they are evaluated. Normally, rules are only executed if their target is out of date. """ __slots__ = ('targ
etexp', 'depexp', 'doublecolon') def __init__(self, targetexp, depexp, doublecolon): assert isinstance(targetexp, (data.Expansion, data.StringExpansion)) assert isinstance(depexp, (data.Expansion, data.StringExpansion)) self.targetexp = targetexp self.depexp = depexp self.doublecolon = doublecolon def execute(self, makefile, context): if context.weak: self._executeweak(makefile, context) else: self._execute(makefile, context) def _executeweak(self, makefile, context): """ If the context is weak (we're just handling dependencies) we can make a number of assumptions here. This lets us go really fast and is generally good. """ assert context.weak deps = self.depexp.resolvesplit(makefile, makefile.variables) # Skip targets with no rules and no dependencies if not deps: return targets = data.stripdotslashes(self.targetexp.resolvesplit(makefile, makefile.variables)) rule = data.Rule(list(data.stripdotslashes(deps)), self.doublecolon, loc=self.targetexp.loc, weakdeps=True) for target in targets: makefile.gettarget(target).addrule(rule) makefile.foundtarget(target) context.currule = rule def _execute(self, makefile, context): assert not context.weak atargets = data.stripdotslashes(self.targetexp.resolvesplit(makefile, makefile.variables)) targets = [data.Pattern(p) for p in _expandwildcards(makefile, atargets)] if not len(targets): context.currule = DummyRule() return ispatterns = set((t.ispattern() for t in targets)) if len(ispatterns) == 2: raise errors.DataError("Mixed implicit and normal rule", self.targetexp.loc) ispattern, = ispatterns deps = list(_expandwildcards(makefile, data.stripdotslashes(self.depexp.resolvesplit(makefile, makefile.variables)))) if ispattern: prerequisites = [data.Pattern(d) for d in deps] rule = data.PatternRule(targets, prerequisites, self.doublecolon, loc=self.targetexp.loc) makefile.appendimplicitrule(rule) else: rule = data.Rule(deps, self.doublecolon, loc=self.targetexp.loc, weakdeps=False) for t in targets: makefile.gettarget(t.gettarget()).addrule(rule) makefile.foundtarget(targets[0].gettarget()) context.currule = rule def dump(self, fd, indent): print("%sRule %s: %s" % (indent, self.targetexp, self.depexp), file=fd) def to_source(self): sep = ':' if self.doublecolon: sep = '::' deps = self.depexp.to_source() if len(deps) > 0 and not deps[0].isspace(): sep += ' ' return '\n%s%s%s' % ( self.targetexp.to_source(escape_variables=True), sep, deps) def __eq__(self, other): if not isinstance(other, Rule): return False return self.targetexp == other.targetexp \ and self.depexp == other.depexp \ and self.doublecolon == other.doublecolon class StaticPatternRule(Statement): """ Static pattern rules are rules which specify multiple targets based on a string pattern. See https://www.gnu.org/software/make/manual/make.html#Static-Pattern They are like `Rule` instances except an added property, `patternexp` is present. It contains the Expansion which represents the rule pattern.
librallu/cohorte-herald
python/herald/transports/http/beans.py
Python
apache-2.0
3,526
0
#!/usr/bin/python # -- Content-Encoding: UTF-8 -- """ Herald HTTP beans definition :author: Thomas Calmant :copyright: Copyright 2014, isandlaTech :license: Apache License 2.0 :version: 0.0.3 :status: Alpha .. Copyright 2014 isandlaTech Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # Module version __version_info__ = (0, 0, 3) __version__ = ".".join(str(x) for x in __version_info__) # Documentation strings format __docformat__ = "restructuredtext en" # ------------------------------------------------------------------------------ # Herald HTTP from . import ACCESS_ID # Standard library import functools # -----
------------------------------------------------------------------------- @functools.total_ordering class HTTPAccess(object): """ Description of an HTTP access """ def __init__(self, host,
port, path): """ Sets up the access :param host: HTTP server host :param port: HTTP server port :param path: Path to the Herald service """ # Normalize path if path[0] == '/': path = path[1:] self.__host = host self.__port = int(port) self.__path = path def __hash__(self): """ Hash is based on the access tuple """ return hash(self.access) def __eq__(self, other): """ Equality based on JID """ if isinstance(other, HTTPAccess): return self.access == other.access return False def __lt__(self, other): """ JID string ordering """ if isinstance(other, HTTPAccess): return self.access < other.access return False def __str__(self): """ String representation """ return "http://{0}:{1}/{2}".format(self.__host, self.__port, self.__path) @property def access_id(self): """ Retrieves the access ID associated to this kind of access """ return ACCESS_ID @property def access(self): """ Returns the access to the peer as a 3-tuple (host, port, path) """ return self.__host, self.__port, self.__path @property def address(self): """ Returns the address of the HTTP server to access the peer (host, port) """ return self.__host, self.__port @property def host(self): """ Retrieves the host address of the associated peer """ return self.__host @property def port(self): """ Retrieves the host port of the associated peer """ return self.__port @property def path(self): """ Retrieves the path to the Herald service """ return self.__path def dump(self): """ Returns the content to store in a directory dump to describe this access """ return self.access
renatopp/liac-soccer
clients/python/setup.py
Python
mit
495
0.024242
import sys from
cx_Freeze import setup, Executable # Dependencies are automatically detected, but it might need fine tuning. build_exe_options = {"packages": ["math", "json"], "excludes": ["tkinter"]} # GUI applications require a different base on Windows (the default is for a # console application). setup( name = "liac-soccer", version = "1.0.0", description = "", options = {"build_exe": build_exe_options}, e
xecutables = [Executable("ball_follower.py")])
OpenCMISS/neon
src/opencmiss/neon/ui/simulations/ui_biomeng321lab1.py
Python
apache-2.0
18,237
0.003784
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'res/designer/simulations/biomeng321lab1.ui' # # Created: Fri Mar 4 13:11:44 2016 # by: pyside-uic 0.2.15 running on PySide 1.2.1 # # WARNING! All changes made in this file will be lost! from PySide import QtCore, QtGui class Ui_Biomeng321Lab1(object): def setupUi(self, shared_opengl_widget, Biomeng321Lab1): Biomeng321Lab1.setObjectName("Biomeng321Lab1") Biomeng321Lab1.resize(1066, 907) self.gridLayout_3 = QtGui.QGridLayout(Biomeng321Lab1) self.gridLayout_3.setObjectName("gridLayout_3") self.groupBox = QtGui.QGroupBox(Biomeng321Lab1) self.groupBox.setObjectName("groupBox") self.gridLayout_4 = QtGui.QGridLayout(self.groupBox) self.gridLayout_4.setObjectName("gridLayout_4") self.groupBox_9 = QtGui.QGroupBox(self.groupBox) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(2) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.groupBox_9.sizePolicy().hasHeightForWidth()) self.groupBox_9.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setPointSize(10) font.setWeight(75) font.setBold(True) self.groupBox_9.setFont(font) self.groupBox_9.setObjectName("groupBox_9") self.gridLayout_2 = QtGui.QGridLayout(self.groupBox_9) self.gridLayout_2.setObjectName("gridLayout_2") self.groupBox_6 = QtGui.QGroupBox(self.groupBox_9) font = QtGui.QFont() font.setPointSize(8) font.setWeight(50) font.setBold(False) self.groupBox_6.setFont(font) self.groupBox_6.setObjectName("groupBox_6") self.horizontalLayout = QtGui.QHBoxLayout(self.groupBox_6) self.horizontalLayout.setObjectName("horizontalLayout") self.label = QtGui.QLabel(self.groupBox_6) font = QtGui.QFont() font.setPointSize(12) font.setWeight(75) font.setBold(True) self.label.setFont(font) self.label.setObjectName("label") self.horizontalLayout.addWidget(self.label) self.label_2 = QtGui.QLabel(self.groupBox_6) self.label_2.setObjectName("label_2") self.horizontalLayout.addWidget(self.label_2) self.tableWidgetDeformationGradient = QtGui.QTableWidget(self.groupBox_6) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.tableWidgetDeformationGradient.sizePolicy().hasHeightForWidth()) self.tableWidgetDeformationGradient.setSizePolicy(sizePolicy) self.tableWidgetDeformationGradient.setObjectName("tableWidgetDeformationGradient") self.tableWidgetDeformationGradient.setColumnCount(0) self.tableWidgetDeformationGradient.setRowCount(0) self.tableWidgetDeformationGradient.horizontalHeader().setVisible(False) self.tableWidgetDeformationGradient.verticalHeader().setVisible(False) self.horizontalLayout.addWidget(self.tableWidgetDeformationGradient) self.gridLayout_2.addWidget(self.groupBox_6, 0, 0, 1, 1) self.groupBox_2 = QtGui.QGroupBox(self.groupBox_9) font = QtGui.QFont() font.setPointSize(8) font.setWeight(50) font.setBold(False) self.groupBox_2.setFont(font) self.groupBox_2.setObjectName("groupBox_2") self.formLayout = QtGui.QFormLayout(self.groupBox_2) self.formLayout.setObjectName("formLayout") self.label_3 = QtGui.QLabel(self.groupBox_2) self.label_3.setObjectName("label_3") self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_3) self.lineEditInvariant1 = QtGui.QLineEdit(self.groupBox_2) self.lineEditInvariant1.setObjectName("lineEditInvariant1") self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.lineEditInvariant1) self.label_4 = QtGui.QLabel(self.groupBox_2) self.label_4.setObjectName("label_4") self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_4) self.lineEditInvariant2 = QtGui.QLineEdit(self.groupBox_2) self.lineEditInvariant2.setObjectName("lineEditInvariant2") self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.lineEditInvariant2) self.label_5 = QtGui.QLabel(self.groupBox_2) self.label_5.setObjectName("label_5") self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_5) self.lineEditInvariant3 = QtGui.QLineEdit(self.groupBox_2) self.lineEditInvariant3.setObjectName("lineEditInvariant3") self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.lineEditInvariant3) self.gridLayout_2.addWidget(self.groupBox_2, 0, 1, 1, 1) self.groupBox_7 = QtGui.QGroupBox(self.groupBox_9) font = QtGui.QFont() font.setPointSize(8) font.setWeight(50) font.setBold(False) self.groupBox_7.setFont(font) self.groupBox_7.setObjectName("groupBox_7") self.horizontalLayout_2 = QtGui.QHBoxLayout(self.groupBox_7) self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.label_7 = QtGui.QLabel(self.groupBox_7) font = QtGui.QFont() font.setPointSize(12) font.setWeight(75) font.setBold(True) self.label_7.setFont(font) self.label_7.setObjectName("label_7") self.horizontalLayout_2.addWidget(self.label_7) self.label_8 = QtGui.QLabel(self.groupBox_7) self.label_8.setObjectName("label_8") self.horizontalLayout_2.addWidget(self.label_8) self.tableWidgetRightCauchyGreenDeformation = QtGui.QTableWidget(self.groupBox_7) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.tableWidgetRightCauchyGreenDeformation.sizePolicy().hasHeightForWidth()) self.tableWidgetRightCauchyGreenDeformation.setSizePolicy(sizePolicy) self.tableWidgetRightCauchyGreenDeformation.setObjectName("tableWidgetRightCauchyGreenDeformation") self.tableWidgetRightCauchyGreenDeformation.setColumnCount(0) self.tableWidgetRightCauchyGreenDeformation.setRowCount(0) self.tableWidgetRightCauchyGreenDeformation.horizontalHeader().setVisible(False) self.tableWidgetRightCauchyGreenDeformation.verticalHeader().setVisible(False) self.horizontalLayout_2.addWidget(self.tableWidgetRightCauchyGreenDeformation) self.gridLayout_2.addWidget(self.groupBox_7, 1, 0, 1, 1) self.groupBox_5 = QtGui.QGroupBox(self.groupBox_9) font = QtGui.QFont() font.setPointSize(8) font.setWeight(50) font.setBold(False) self.groupBox_5.setFont(font) self.groupBox_5.setObjectName("groupBox_5") self.gridLayout_6 = QtGui.QGridLayout(self.groupBox_5) self.grid
Layou
t_6.setObjectName("gridLayout_6") self.label_9 = QtGui.QLabel(self.groupBox_5) font = QtGui.QFont() font.setPointSize(12) font.setWeight(75) font.setBold(True) self.label_9.setFont(font) self.label_9.setObjectName("label_9") self.gridLayout_6.addWidget(self.label_9, 0, 0, 1, 1) self.tableWidgetGreenLagrangeStrain = QtGui.QTableWidget(self.groupBox_5) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.tableWidgetGreenLagrangeStrain.sizePolicy().hasHeightForWidth()) self.tableWidgetGreenLagrangeStrain.setSizePolicy(sizePolicy) self.tableWidgetGreenLagrangeStrain.setObjectName("tableWidgetGreenLagrangeStrain") self.tableWidgetGreenLagrangeStrain.setColumnCount(0) self.tableWidge
mezz64/home-assistant
tests/components/nest/test_climate_sdm.py
Python
apache-2.0
43,509
0.000207
""" Test for Nest climate platform for the Smart Device Management API. These tests fake out the subscriber/devicemanager, and are not using a real pubsub subscriber. """ from google_nest_sdm.device import Device from google_nest_sdm.event import EventMessage import pytest from homeassistant.components.climate.const import ( ATTR_CURRENT_TEMPERATURE, ATTR_FAN_MODE, ATTR_FAN_MODES, ATTR_HVAC_ACTION, ATTR_HVAC_MODES, ATTR_PRESET_MODE, ATTR_PRESET_MODES, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL, CURRENT_HVAC_HEAT, CURRENT_HVAC_IDLE, CURRENT_HVAC_OFF, FAN_LOW, FAN_OFF, FAN_ON, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COO
L, HVAC_MODE_OFF, PRESET_ECO, PRESET_NONE, PRESET_SLEEP, ) from homeassistant.const import ATTR_TEMPERATURE from .common import async_setup_sdm_platform from tests.components.climate import common PLATFORM = "climate" async def setup_climate(hass, raw_traits=None, auth=None): """Load Nest climate devices.""" devices = None if raw_traits: traits = raw_traits traits["sdm.devices.traits.Info"] = {"customName": "My Thermostat"} devices = { "some-device-id": Device.MakeDevice( { "name": "some-device-id", "type": "sdm.devices.types.Thermostat", "traits": traits, }, auth=auth, ), } return await async_setup_sdm_platform(hass, PLATFORM, devices) async def test_no_devices(hass): """Test no devices returned by the api.""" await setup_climate(hass) assert len(hass.states.async_all()) == 0 async def test_climate_devices(hass): """Test no eligible climate devices returned by the api.""" await setup_climate(hass, {"sdm.devices.traits.CameraImage": {}}) assert len(hass.states.async_all()) == 0 async def test_thermostat_off(hass): """Test a thermostat that is not running.""" await setup_climate( hass, { "sdm.devices.traits.ThermostatHvac": {"status": "OFF"}, "sdm.devices.traits.ThermostatMode": { "availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"], "mode": "OFF", }, "sdm.devices.traits.Temperature": { "ambientTemperatureCelsius": 16.2, }, }, ) assert len(hass.states.async_all()) == 1 thermostat = hass.states.get("climate.my_thermostat") assert thermostat is not None assert thermostat.state == HVAC_MODE_OFF assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] == 16.2 assert set(thermostat.attributes[ATTR_HVAC_MODES]) == { HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF, } assert thermostat.attributes[ATTR_TEMPERATURE] is None assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] is None assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] is None assert ATTR_PRESET_MODE not in thermostat.attributes assert ATTR_PRESET_MODES not in thermostat.attributes assert ATTR_FAN_MODE not in thermostat.attributes assert ATTR_FAN_MODES not in thermostat.attributes async def test_thermostat_heat(hass): """Test a thermostat that is heating.""" await setup_climate( hass, { "sdm.devices.traits.ThermostatHvac": { "status": "HEATING", }, "sdm.devices.traits.ThermostatMode": { "availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"], "mode": "HEAT", }, "sdm.devices.traits.Temperature": { "ambientTemperatureCelsius": 16.2, }, "sdm.devices.traits.ThermostatTemperatureSetpoint": { "heatCelsius": 22.0, }, }, ) assert len(hass.states.async_all()) == 1 thermostat = hass.states.get("climate.my_thermostat") assert thermostat is not None assert thermostat.state == HVAC_MODE_HEAT assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_HEAT assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] == 16.2 assert set(thermostat.attributes[ATTR_HVAC_MODES]) == { HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF, } assert thermostat.attributes[ATTR_TEMPERATURE] == 22.0 assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] is None assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] is None assert ATTR_PRESET_MODE not in thermostat.attributes assert ATTR_PRESET_MODES not in thermostat.attributes async def test_thermostat_cool(hass): """Test a thermostat that is cooling.""" await setup_climate( hass, { "sdm.devices.traits.ThermostatHvac": { "status": "COOLING", }, "sdm.devices.traits.ThermostatMode": { "availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"], "mode": "COOL", }, "sdm.devices.traits.Temperature": { "ambientTemperatureCelsius": 29.9, }, "sdm.devices.traits.ThermostatTemperatureSetpoint": { "coolCelsius": 28.0, }, }, ) assert len(hass.states.async_all()) == 1 thermostat = hass.states.get("climate.my_thermostat") assert thermostat is not None assert thermostat.state == HVAC_MODE_COOL assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_COOL assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] == 29.9 assert set(thermostat.attributes[ATTR_HVAC_MODES]) == { HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF, } assert thermostat.attributes[ATTR_TEMPERATURE] == 28.0 assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] is None assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] is None assert ATTR_PRESET_MODE not in thermostat.attributes assert ATTR_PRESET_MODES not in thermostat.attributes async def test_thermostat_heatcool(hass): """Test a thermostat that is cooling in heatcool mode.""" await setup_climate( hass, { "sdm.devices.traits.ThermostatHvac": { "status": "COOLING", }, "sdm.devices.traits.ThermostatMode": { "availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"], "mode": "HEATCOOL", }, "sdm.devices.traits.Temperature": { "ambientTemperatureCelsius": 29.9, }, "sdm.devices.traits.ThermostatTemperatureSetpoint": { "heatCelsius": 22.0, "coolCelsius": 28.0, }, }, ) assert len(hass.states.async_all()) == 1 thermostat = hass.states.get("climate.my_thermostat") assert thermostat is not None assert thermostat.state == HVAC_MODE_HEAT_COOL assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_COOL assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] == 29.9 assert set(thermostat.attributes[ATTR_HVAC_MODES]) == { HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF, } assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] == 22.0 assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] == 28.0 assert thermostat.attributes[ATTR_TEMPERATURE] is None assert ATTR_PRESET_MODE not in thermostat.attributes assert ATTR_PRESET_MODES not in thermostat.attributes async def test_thermostat_eco_off(hass): """Test a thermostat cooling with eco off.""" await setup_climate( hass, { "sdm.devices.traits.ThermostatHvac": { "status": "COOLING", }, "sdm.devices.traits.ThermostatMode": { "availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
Azure/azure-sdk-for-python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_06_01/aio/operations/_route_filter_rules_operations.py
Python
mit
28,535
0.005011
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class RouteFilterRulesOperations: """RouteFilterRulesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2018_06_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _delete_initial( self, resource_group_name: str, r
oute_filter_name: str, rule_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-06-01" # Construct URL ur
l = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'), 'ruleName': self._serialize.url("rule_name", rule_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore async def begin_delete( self, resource_group_name: str, route_filter_name: str, rule_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes the specified rule from a route filter. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param route_filter_name: The name of the route filter. :type route_filter_name: str :param rule_name: The name of the rule. :type rule_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, route_filter_name=route_filter_name, rule_name=rule_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'), 'ruleName': self._serialize.url("rule_name", rule_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore async def get( self, resource_group_name: str, route_filter_name: str, rule_name: str, **kwargs: Any ) -> "_models.RouteFilterRule": """Gets the specified rule from a route filter. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param route_filter_name: The name of the route filter. :type route_filter_name: str :param rule_name: The n
MLAB-project/pymlab
src/pymlab/sensors/i2cpwm.py
Python
gpl-3.0
2,819
0.010287
#!/usr/bin/python import time from pymlab.sensors import Device #TODO: set only one pin, not all bus class I2CPWM(Device): 'Python library for I2CPWM01A MLAB module with NXP Semiconductors PCA9531 I2C-bus LED dimmer' MODES = { 'X': 0b00, 'LOW': 0b01, 'PWM0': 0b10, 'PWM1': 0b11, } def __init__(self, parent = None, address = 0b1100011, **kwargs): Device.__init__(self, parent, address, **kwargs) 'The INPUT register reflects the state of the device pins. Writes to this register will be acknowledged but will have no effect.' self.PWM_INPUT = 0x00 'PSC0 is used to program the period of the PWM output.' self.PWM_PSC0 = 0x01 'The PWM0 register determines the duty cycle of BLINK0. The outputs are LOW (LED on) when the count is less than the value in PWM0 and HIGH (LED off) when it is greater. If PWM0 is programmed with 00h, then the PWM0 output is always HIGH (LED off).' self.PWM_PWM0 = 0x02
'PSC1 is used to program the period of the PWM output.' self.PWM_PSC1 = 0x03 'The PWM1 register determines the duty cycle of BLINK1. The outputs are LOW (LED on) when the count is less than the value in PWM1 and HIGH (LED off) when it is greater. If PWM1 is programmed with 00h, then the PWM1 output is always HIGH (LED off).' self.PWM_PWM1 = 0x04 'The LSn LED select registers determine the source of the LED
data.' self.PWM_LS0 = 0x05 self.PWM_LS1 = 0x06 def set_pwm0(self, frequency, duty): # frequency in Hz, Duty cycle in % (0-100) period = int((1.0/float(frequency))*152.0)-1 duty = int((float(duty)/100.0)*255.0) self.bus.write_byte_data(self.address, 0x01, period) self.bus.write_byte_data(self.address, self.PWM_PWM0, duty) def set_pwm1(self, frequency, duty): # frequency in Hz, Duty cycle in % (0-100) period = int((1.0/float(frequency))*152.0)-1 duty = int((float(duty)/100.0)*255.0) self.bus.write_byte_data(self.address, self.PWM_PSC1, period) self.bus.write_byte_data(self.address, self.PWM_PWM1, duty) def set_ls0(self, mode): self.bus.write_byte_data(self.address, self.PWM_LS0, mode) def set_ls1(self, mode): self.bus.write_byte_data(self.address, self.PWM_LS1, mode) def set_output_type(self, mode = ['X','X','X','X','X','X','X','X']): set_ls0((MODES[mode[0]] << 6) | (MODES[mode[1]] << 4) | (MODES[mode[2]] << 2) | MODES[mode[3]]) set_ls1((MODES[mode[4]] << 6) | (MODES[mode[5]] << 4) | (MODES[mode[6]] << 2) | MODES[mode[7]]) def get_input(self): return self.bus.read_byte_data(self.address, self.PWM_INPUT) def main(): print(__doc__) if __name__ == "__main__": main()
SimplyPaper/SimplyPaper.github.io
AzureBus.py
Python
apache-2.0
842
0.008314
from azure.servicebus import ServiceBusService, Message, Queue #c-types bus_service = ServiceBusService( service_namespace='SimplyPaper',
shared_access_key_name='RootManageSharedAccessKey', shared_access_key_value='1Y4YNh7uQ/buNi1v3xunn6F6vfSsJ5+nrmiwKY2WM04') #Endpoint=sb://simplypaper.servicebus.windows.net/; #SharedAccessKeyName=RootManageSharedAccessKey; #SharedAccessKey=1Y4YNh7uQ/buNi1v3xunn6F6vfSsJ5+nrmiwKY2WM04= bus_service.create_queue('taskqueue') queue_options = Queue() queue_options.max_size_in_megabytes = '5120' queue_options.defaul
t_message_time_to_live = 'PT1M' bus_service.create_queue('taskqueue', queue_options) msg = Message(b'Test Message Simply Papaer') bus_service.send_queue_message('taskqueue', msg) msg = bus_service.receive_queue_message('taskqueue', peek_lock=False) print(msg.body)
hbrunn/bank-statement-import
account_bank_statement_import/models/account_bank_statement_import.py
Python
agpl-3.0
17,538
0
# -*- coding: utf-8 -*- """Framework for importing bank statement files.""" import logging import base64 from StringIO import StringIO from zipfile import ZipFile, BadZipfile # BadZipFile in Python >= 3.2 from openerp import api, models, fields from openerp.tools.translate import _ from openerp.exceptions import Warning as UserError _logger = logging.getLogger(__name__) # pylint: disable=invalid-name class AccountBankStatementLine(models.Model): """Extend model account.bank.statement.line.""" # pylint: disable=too-many-public-methods _inherit = "account.bank.statement.line" # Ensure transactions can be imported only once (if the import format # provides unique transaction ids) unique_import_id = fields.Char('Import ID', readonly=True, copy=False) _sql_constraints = [ ('unique_import_id', 'unique (unique_import_id)', 'A bank account transactions can be imported only once !') ] class AccountBankStatementImport(models.TransientModel): """Extend model account.bank.statement.""" _name = 'account.bank.statement.import' _description = 'Import Bank Statement' @api.model def _get_hide_journal_field(self): """ Return False if the journal_id can't be provided by the parsed file and must be provided by the wizard. See account_bank_statement_import_qif """ # pylint: disable=no-self-use return True journal_id = fields.Many2one( 'account.journal', string='Journal', help='Accounting journal related to the bank statement you\'re ' 'importing. It has be be manually chosen for statement formats which ' 'doesn\'t allow automatic journal detection (QIF for example).') hide_journal_field = fields.Boolean( string='Hide the journal field in the view', compute='_get_hide_journal_field') data_file = fields.Binary( 'Bank Statement File', required=True, help='Get you bank statements in electronic format from your bank ' 'and select them here.') @api.multi def import_file(self): """Process the file chosen in the wizard, create bank statement(s) and go to reconciliation.""" self.ensure_one() data_file = base64.b64decode(self.data_file) # pylint: disable=protected-access statement_ids, notifications = self.with_context( active_id=self.id # pylint: disable=no-member )._import_file(data_file) # dispatch to reconciliation interface action = self.env.ref( 'account.action_bank_reconcile_bank_statements') return { 'name': action.name, 'tag': action.tag, 'context': { 'statement_ids': statement_ids, 'notifications': notifications }, 'type': 'ir.actions.client', } @api.model def _parse_all_files(self, data_file): """Parse one file or multiple files from zip-file. Return array of statements for further processing. """ statements = [] files = [data_file] try: with ZipFile(StringIO(data_file), 'r') as archive: files = [ archive.read(filename) for filename in archive.namelist() if not filename.endswith('/') ] except BadZipfile: pass # Parse the file(s) for import_file in files: # The appropriate implementation module(s) returns the statements. # Actually we don't care wether all the files have the same # format. Although unlikely you might mix mt940 and camt files # in one zipfile. parse_result = self._parse_file(import_file) # Check for old version result, with separate currency and account if isinstance(parse_result, tuple) and len(parse_result) == 3: (currency_code, account_number, new_statements) = parse_result for stmt_vals in new_statements: stmt_vals['currency_code'] = currency_code stmt_vals['account_number'] = account_number else: new_statements = parse_result statements += new_statements return statements @api.model def _import_file(self, data_file): """ Create bank statement(s) from file.""" # The appropriate implementation module returns the required data statement_ids = [] notifications = [] statements = self._parse_all_files(data_file) # Check raw data: self._check_parsed_data(statements) # Import all statements: for stmt_vals in statements: (statement_id, new_notifications) = ( self._import_statement(stmt_vals)) if statement_id: statement_ids.append(statement_id) notifications.extend(new_notifications) if len(statement_ids) == 0: raise UserError(_('You have already imported that file.')) return statement_ids, notifications @api.model def _import_statement(self, stmt_vals): """Import a single bank-statement. Return ids of created statements and notifications. """ currency_code = stmt_vals.pop('currency_code') account_number = stmt_vals.pop('account_number') # Try to find the bank account and currency in odoo currency_id = self._find_currency_id(currency_code) bank_account_id = self._find_bank_account_id(account_number) if not bank_account_id and account_number: raise UserError( _('Can not find the account number %s.') % account_number ) # Find the bank journal journal_id = self._get_journal(currency_id, bank_account_id) # By now journal and account_number must be known if not journal_id: raise UserError(_('Can not determine journal for import.')) # Prepare statement data to be used for bank statements creation stmt_vals = self._complete_statement( stmt_vals, journal_id, account_number) # Create the bank stmt_vals return self._create_bank_statement(stmt_vals) @api.model def _parse_file(self, data_file): # pylint: disable=no-self-use # pylint: disable=unused-argument """ Each module adding a file support must extends this method. It processes the file if it can, returns super otherwise, resulting in a chain of responsability. This method parses the given file and returns the data required by the bank statement import process, as specified below. - bank statements data: list of dict containing (optional items marked by o) : -o currency code: string (e.g: 'EUR') The ISO 4217 currency code, case insensitive -o account number: string (e.g: 'BE1234567890') The number of the bank account which the statement belongs to - 'name': string (e.g: '000000123') - 'date': date (e.g: 2013-06-26) -o 'balance_start': float (e.g: 8368.56) -o 'balance_end_real': float (e.g: 8888.88) - 'transactio
ns': list of dict containing : - 'name': string (e.g: 'KBC-INVESTERINGSKREDIET 787-5562831-01') - 'date': date - 'amount': float - 'unique_import_id': string -o 'account_number': string Will be used to find/create the res.partner.bank in odoo -o 'note': string -o 'part
ner_name': string -o 'ref': string """ raise UserError(_( 'Could not make sense of the given file.\n' 'Did you install the module to support this type of file?' )) @api.model def _check_parsed_data(self, statements): # pylint: disable=no-self-use """ Basi
ebmdatalab/openprescribing
openprescribing/pipeline/migrations/0002_tasklog_formatted_tb.py
Python
mit
402
0
# -*- cod
ing: utf-8 -*- # Generated by Django 1.9.1 on 2017-07-05 10:45 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('pipeline', '0001_initial'), ] operations = [ migrations.AddField(
model_name='tasklog', name='formatted_tb', field=models.TextField(null=True), ), ]
tony-rasskazov/meteo
weewx/bin/weewx/drivers/wmr200.py
Python
mit
77,782
0.002031
# # Copyright (c) 2013 Chris Manton <cmanton@gmail.com> www.onesockoff.org # See the file LICENSE.txt for your full rights. # # Special recognition to Lars de Bruin <l...@larsdebruin.net> for contributing # packet decoding code. # # pylint parameters # suppress global variable warnings # pylint: disable-msg=W0603 # suppress weewx driver methods not implemented # pylint: disable-msg=W0223 # suppress weewx driver methods non-conforming name # pylint: disable-msg=C0103 # suppress too many lines in module # pylint: disable-msg=C0302 # suppress too many instance attributes # pylint: disable-msg=R0902 # suppress too many public methods # pylint: disable-msg=R0904 # suppress too many statements # pylint: disable-msg=R0915 # suppress unused arguments e.g. loader(...,engine) # pylint: disable-msg=W0613 """Classes and functions to interfacing with an Oregon Scientific WMR200 station Oregon Scientific http://us.oregonscientific.com/ulimages/manuals2/WMR200.pdf Bronberg Weather Station For a pretty good summary of what's in these packets see http://www.bashewa.com/wmr200-protocol.php """ import select import socket import syslog import threading import time import usb import weewx.drivers import weeutil.weeutil DRIVER_NAME = 'WMR200' DRIVER_VERSION = "3.1" def loader(config_dict, engine): return WMR200(**config_dict[DRIVER_NAME]) def confeditor_loader(): return WMR200ConfEditor() # General decoding sensor maps. WIND_DIR_MAP = {0: 'N', 1: 'NNE', 2: 'NE', 3: 'ENE', 4: 'E', 5: 'ESE', 6: 'SE', 7: 'SSE', 8: 'S', 9: 'SSW', 10: 'SW', 11: 'WSW', 12: 'W', 13: 'WNW', 14: 'NW', 15: 'NNW'} FORECAST_MAP = {0: 'Partly Cloudy', 1: 'Rainy', 2: 'Cloudy', 3: 'Sunny', 4: 'Clear Night', 5: 'Snowy', 6: 'Partly Cloudy Night', 7: 'Unknown7'} TRENDS = {0: 'Stable', 1: 'Rising', 2: 'Falling', 3: 'Undefined'} # Size of USB frame to read from weather console. _WMR200_USB_FRAME_SIZE = 8 # Time to sleep in seconds between querying usb device thread # for data. This should be non-zero and reduces load on the machine. _WMR200_USB_POLL_INTERVAL = 1 # Time interval in secs to send data to the wmr200 to request live data. _WMR200_REQUEST_LIVE_DATA_INTERVAL = 30 # Time in secs to block and wait for data from the weather console device. # Related to time to request live data. _WMR200_USB_READ_DATA_INTERVAL = _WMR200_REQUEST_LIVE_DATA_INTERVAL / 2 # Time in ms to wait for USB reset to complete. _WMR200_USB_RESET_TIMEOUT = 1000 # Guessed wmr200 protocol max packet size in bytes. # This is only a screen to differentiate between good and # bad packets. _WMR200_MAX_PACKET_SIZE = 0x80 # Driver name. _WMR200_DRIVER_NAME = 'wmr200' # weewx configurable flags for enabling/disabling debug verbosity. # Prints processed packets with context from console. DEBUG_PACKETS_COOKED = 0 # Prints raw pre-processed packets from console. DEBUG_PACKETS_RAW = 0 # Prints respective packets individually. DEBUG_PACKETS_ARCHIVE = 0 DEBUG_PACKETS_PRESSURE = 0 DEBUG_PACKETS_RAIN = 0 DEBUG_PACKETS_STATUS = 0 DEBUG_PACKETS_TEMP = 0 DEBUG_PACKETS_UVI = 0 DEBUG_PACKETS_WIND = 0 # Print communication messages DEBUG_COMM = 0 # Print weather station configuration. DEBUG_CONFIG_DATA = 0 # Print all writes to weather console. DEBUG_WRITES = 0 DEBUG_READS = 0 DEBUG_CHECKSUM = 0 def logmsg(dst, msg): """Base syslog helper""" syslog.syslog(dst, ('%s: %s: %s' % (_WMR200_DRIVER_NAME, threading.currentThread().getName(), msg))) def logdbg(msg): """Debug syslog helper""" logmsg(syslog.LOG_DEBUG, 'D ' + msg) def loginf(msg): """Info syslog helper""" logmsg(syslog.LOG_INFO, 'I ' + msg) def logwar(msg): """Warning syslog helper""" logmsg(syslog.LOG_WARNING, 'W ' + msg) def logerr(msg): """Error syslog helper""" logmsg(syslog.LOG_ERR, 'E ' + msg) def logcrt(msg): """Critical syslog helper""" logmsg(syslog.LOG_CRIT, 'C ' + msg) class WMR200PacketParsingError(Exception): """A driver handled recoverable packet parsing error condition.""" def __init__(self, msg): super(WMR200PacketParsingError, self).__init__() self._msg = msg @property def msg(self): """Exception message to be logged to console.""" return self._msg class WMR200ProtocolError(weewx.WeeWxIOError): """Used to signal a protocol error condition""" def __init__(self, msg): super(WMR200ProtocolError, self).__init__() self._msg = msg logerr(msg) class UsbDevice(object): """General class to handles all access to device via USB bus.""" def __init__(self): # Polling read timeout. self.timeout_read = _WMR200_USB_READ_DATA_INTERVAL # USB device used for libusb self.dev = None # Holds device handle for access self.handle = None # debug byte count self.byte_cnt_rd = 0 self.byte_cnt_wr = 0 # default to a sane endpoint self.in_endpoint = usb.ENDPOINT_IN + 1 # only one interface self.interface = 0 def find_device(self, vendor_id, product_id): """Find the given vendor and product IDs on the USB bus Returns: True if specified device was found, otherwise false. """ for bus in usb.busses(): for dev in bus.devices: if dev.idVendor == vendor_id \ and dev.idProduct == product_id: self.dev = dev return True return False def open_device(self): """Opens a USB device and get a handle to read and write. A specific device must have been found.""" try: self.handle = self.dev.open() except usb.USBError, exception: logcrt(('open_device() Unable to open USB interface.' ' Reason: %s' % exception)) raise weewx.WakeupError(exception) except AttributeError, exception: logcrt('open_device() Device not specified.') raise weewx.WakeupError(exception) # Detach any old claimed interfaces try: self.handle.detachKernelDriver(self.interface) except usb.USBError: pass try: self.handle.claimInterface(self.interf
ace) except usb.USBError, exception: logcrt(('open_device() Unable to' ' claim USB interface. Reason: %s' % exception)) raise weewx.WakeupError(exception) def close_device(self): """Close a device for access. NOTE(CMM) There is no busses[].devices[].clos
e() so under linux the file descriptor will remain open for the life of the process. An OS independant mechanism is required so 'lsof' and friends will not be cross platform.""" try: self.handle.releaseInterface() except usb.USBError, exception: logcrt('close_device() Unable to' ' release device interface. Reason: %s' % exception) def read_device(self): """Read a stream of data bytes from the device. Returns a list of valid protocol bytes from the device. The first byte indicates the number of valid bytes following the first byte that are valid protocol bytes. Only the valid protocol bytes are returned. """ if not self.handle: msg = 'read_device() No USB handle for usb_device Read' logerr(msg) raise weewx.WeeWxIOError(msg) report = None try: report = self.handle.interruptRead(self.in_endpoint, _WMR200_USB_FRAME_SIZE, int(self.timeout_read) * 1000) # I think this value indicates that the buffer has overflowed. if report[0] == 8: msg = 'USB read_device overflow error' logerr(msg) raise weewx.WeeWxIOError(msg)
s1s5/django_busybody
tests/test_models.py
Python
mit
11,615
0.001213
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_django_busybody ------------ Tests for `django_busybody` models module. """ from __future__ import unicode_literals import datetime import json import uuid from mock import patch from django.test import TestCase # from django.conf import settings from django.core.files.storage import default_storage from django.contrib.contenttypes.models import ContentType from django.core.files.base import ContentFile from django.test.client import RequestFactory from django.contrib.auth import get_user_model from django.utils import timezone import django_busybody.models as bb_models from django_busybody import easy_crypto from django_busybody.middlewares import GlobalRequestMiddleware from . import models class TestDjango_busybody(TestCase): def setUp(self): self.obj = models.EncryptTest.objects.create( without_encrypt='1', with_encrypt='1', without_encrypt_with_log='1', with_encrypt_with_log='1') def test_get(self): print("=" * 120) obj = models.EncryptTest.objects.get(pk=self.obj.pk) print("=" * 80) self.assertEqual(obj.without_encrypt, '1') self.assertEqual(obj.with_encrypt, '1') self.assertEqual(obj.without_encrypt_with_log, '1') self.assertEqual(obj.with_encrypt_with_log, '1') def test_get_and_save(self): obj = models.EncryptTest.objects.get(pk=self.obj.pk) self.assertEqual(obj.without_encrypt, '1') self.assertEqual(obj.with_encrypt, '1') self.assertEqual(obj.without_encrypt_with_log, '1') self.assertEqual(obj.with_encrypt_with_log, '1') obj.save() self.assertEqual(obj.without_encrypt, '1') self.assertEqual(obj.with_encrypt, '1') self.assertEqual(obj.without_encrypt_with_log, '1') self.assertEqual(obj.with_encrypt_with_log, '1') self.assertEqual(models.EncryptTest.objects.filter(without_encrypt__exact='1').count(), 1) self.assertEqual(models.EncryptTest.objects.filter(with_encrypt__exact='1').count(), 0) self.assertEqual(models.EncryptTest.objects.filter(without_encrypt_with_log__exact='1').count(), 1)
self.assertEqual(models.EncryptTest.objects.filter(with_encrypt_with_log__exact='1').count(), 0) def test_encrypt(self): self.assertEqual(models.EncryptTest.objects.filter(without_encrypt__exact='1').count(), 1) self.assertEqual(models.EncryptTest.objects.filter(with_encrypt__exact='1').count(), 0) self.assertEqual(models.
EncryptTest.objects.filter(without_encrypt_with_log__exact='1').count(), 1) self.assertEqual(models.EncryptTest.objects.filter(with_encrypt_with_log__exact='1').count(), 0) def test_unicode(self): obj = models.EncryptTest.objects.create( without_encrypt='日本語', with_encrypt='日本語', without_encrypt_with_log='日本語', with_encrypt_with_log='日本語') obj = models.EncryptTest.objects.get(pk=obj.pk) self.assertEqual(obj.without_encrypt, '日本語') self.assertEqual(obj.with_encrypt, '日本語') self.assertEqual(obj.without_encrypt_with_log, '日本語') self.assertEqual(obj.with_encrypt_with_log, '日本語') def test_invalid_decrypt(self): models.EncryptTest.objects.filter(pk=self.obj.pk).update(with_encrypt='no_encrypt') self.assertEqual(models.EncryptTest.objects.filter(with_encrypt__exact='no_encrypt').count(), 1) obj = models.EncryptTest.objects.get(pk=self.obj.pk) self.assertEqual(obj.with_encrypt, 'no_encrypt') def test_invalid_decrypt2(self): models.EncryptTest.objects.filter(pk=self.obj.pk).update(with_encrypt='日本語') self.assertEqual(models.EncryptTest.objects.filter(with_encrypt__exact='日本語').count(), 1) obj = models.EncryptTest.objects.get(pk=self.obj.pk) self.assertEqual(obj.with_encrypt, '日本語') def test_invalid_decrypt3(self): import base64 from Crypto.Cipher import AES iv = b'\xf2\xae' * 8 raw = '日本語' * 16 cipher = AES.new(easy_crypto._cipher.key, AES.MODE_CBC, iv) value = base64.b64encode(iv + cipher.encrypt(raw.encode('utf-8'))) models.EncryptTest.objects.filter(pk=self.obj.pk).update(with_encrypt=value) models.EncryptTest.objects.get(pk=self.obj.pk) def tearDown(self): models.EncryptTest.objects.get(pk=self.obj.pk).delete() class TestDjango_history(TestCase): def setUp(self): self.user = get_user_model().objects.create(username='test') self.obj = models.AllField.objects.create( big_integer=0, binary=b"", boolean=True, char="", date=timezone.now(), date_time=timezone.now(), decimal=0, duration=datetime.timedelta(seconds=1), email="hoge@email.com", _file=default_storage.save("hello.txt", ContentFile("hello world")), file_path="hoge.txt", _float=0.0, integer=0, generic_ip_address="0.0.0.0", null_boolean=None, positive_integer=1, positive_small_integer=1, slug="slug", small_integer=0, text="text", time=timezone.now(), url="http://hoge.com", uuid=uuid.uuid4().hex, foreign_key=self.user, one_to_one=self.user) @property def latest_history(self): return bb_models.History.objects.all().order_by('-changed_at')[0] def test_history_bool(self): obj = models.AllField.objects.get(pk=self.obj.pk) obj.boolean = False obj.null_boolean = True obj.save() def test_history_integer(self): obj = models.AllField.objects.get(pk=self.obj.pk) obj.big_integer = 10 obj.decimal = 10 obj._float = 0.1 obj.integer = 10 obj.positive_integer = 10 obj.positive_small_integer = 10 obj.small_integer = 1 obj.save() def test_history_binary(self): obj = models.AllField.objects.get(pk=self.obj.pk) obj.binary = b"binary_value" obj.save() def test_history_string(self): obj = models.AllField.objects.get(pk=self.obj.pk) obj.char = "char" obj.email = "hoge2@email.com" obj.file_path = "hoge2.txt" obj.generic_ip_address = "0.0.0.1" obj.slug = "slug1" obj.text = "text1" obj.url = "http://hoge1.com" obj.uuid = uuid.uuid4().hex obj.save() def test_history_datetime(self): obj = models.AllField.objects.get(pk=self.obj.pk) obj.date = timezone.now() obj.date_time = timezone.now() obj.duration = datetime.timedelta(seconds=2) obj.time = timezone.now() obj.save() def test_history_file(self): obj = models.AllField.objects.get(pk=self.obj.pk) obj._file.save("hello2.txt", ContentFile("hello world2"), save=True) obj2 = models.AllField.objects.create() obj2._file.save("hello2.txt", ContentFile("hello world2"), save=True) obj2 = models.AllField.objects.create() obj2._file.save("hello2.txt", ContentFile("hello world2"), save=True) def test_history_key(self): new_user = get_user_model().objects.create(username='test2') obj = models.AllField.objects.get(pk=self.obj.pk) obj.foreign_key = new_user obj.one_to_one = new_user obj.save() class TestDjango_history_encrypt(TestCase): def setUp(self): self.obj = models.EncryptTest.objects.create( without_encrypt='1', with_encrypt='1', without_encrypt_with_log='1', with_encrypt_with_log='1') def tearDown(self): bb_models.History.objects.all().delete() models.EncryptTest.objects.get(pk=self.obj.pk).delete() def check_history(self, obj, key='without_encrypt_with_log'): history = bb_models.History.objects.filte
ricardodeazambuja/BrianConnectUDP
examples/OutputNeuronGroup_multiple_inputs.py
Python
cc0-1.0
2,862
0.014675
''' Example of a spike receptor (only receives spikes) In this example spikes are received and processed creating a raster plot at the end of the s
imulation. ''' from brian import * import numpy from brian_multiprocess_udp import BrianConnectUDP # The main function with the NeuronGroup(s) and Synapse(s) must be named "main_NeuronGroup". # It will receive two objects: input_Neuron_Group and the simulation_clock. The input_Neuron_Group # will supply the input spikes to the network. The size of the spike train received equals NumOfNeuronsInput. # The size of the output spike train equals NumOfNeuronsOutput and
must be the same size of the NeuronGroup who is # going to interface with the rest of the system to send spikes. # The function must return all the NeuronGroup objects and all the Synapse objects this way: # ([list of all NeuronGroups],[list of all Synapses]) # and the FIRST (index 0) NeuronGroup of the list MUST be the one where the OUTPUT spikes will be taken by the simulation. # # Here is also possible to use "dummy" NeuronGroups only to receive and/or send spikes. my_neuron_input_number = 100 def main_NeuronGroup(input_Neuron_Group, simulation_clock): print "main_NeuronGroup!" #DEBUG! simclock = simulation_clock Nr=NeuronGroup(my_neuron_input_number, model='v:1', reset=0, threshold=0.5, clock=simclock) Nr.v=0 # SYNAPSES BETWEEN REAL NEURON NETWORK AND THE INPUT Syn_iNG_Nr=Synapses(input_Neuron_Group, Nr, model='w:1', pre='v+=w', clock=simclock) Syn_iNG_Nr[:,:]='i==j' print "Total Number of Synapses:", len(Syn_iNG_Nr) #DEBUG! Syn_iNG_Nr.w=1 MExt=SpikeMonitor(Nr) # Spikes sent by UDP Mdummy=SpikeMonitor(input_Neuron_Group) # Spikes received by UDP return ([Nr],[Syn_iNG_Nr],[MExt,Mdummy]) def post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN): """ input_NG: the neuron group that receives the input spikes simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup) simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup) simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup) This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation! """ pass figure() raster_plot(simulation_MN[1]) title("Spikes Received by UDP") show(block=True) # savefig('output.pdf') if __name__=="__main__": my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsInput=my_neuron_input_number, post_simulation_function=post_simulation_function, input_addresses=[("127.0.0.1", 14141, 40),("127.0.0.1", 16161, 60)], simclock_dt=1, inputclock_dt=2, TotalSimulationTime=10000, sim_repetitions=0, brian_address=2)
JShadowMan/package
python/multi-process-thread/multiprocess.py
Python
mit
660
0.019697
#!/usr/bin/env python3 from multiprocessing import Process, Pool import os, time def proc(name): print(time.asctime(), 'child process(name: %s) id %s. ppid %s' % (name, os.getpid(), os.getppid(
))) time.sleep(3) print(time.asctime(), 'child process end') if __name__ == '__main__': p = Process(target = proc, args = ('child',)) print(time.asctime(), 'child process will start') p.start() p.join() print('first child process end') pl = Pool(4) f
or index in range(4): pl.apply_async(proc, args = (index,)) pl.close() pl.join() print(time.asctime(), 'parent process end')
mcallaghan/tmv
BasicBrowser/scoping/migrations/0129_auto_20170815_0946.py
Python
gpl-3.0
1,435
0.003484
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-08-15 09:46 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependenci
es = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('scoping', '0128_auto_20170808_0954'), ] operations = [
migrations.CreateModel( name='ProjectRoles', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('role', models.CharField(choices=[('OW', 'Owner'), ('AD', 'Admin'), ('RE', 'Reviewer'), ('VE', 'Viewer')], max_length=2)), ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.RemoveField( model_name='project', name='owner', ), migrations.AddField( model_name='projectroles', name='project', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scoping.Project'), ), migrations.AddField( model_name='project', name='users', field=models.ManyToManyField(through='scoping.ProjectRoles', to=settings.AUTH_USER_MODEL), ), ]
ParkJinSang/SIMONFramework
src/SIMON_Py/SIMON/algorithms/SIMONAlgorithmMain.py
Python
apache-2.0
1,860
0.003763
__author__ = 'PARKJINSANG' from SIMON.algorithms.genetic.SIMONGeneticAlgorithm import SIMONGeneticAlgorithm # # genetic algorithm for learning and evaluating # # def run_genetic_algorithm(group, actionPool, propertyPool): algorithm = SIMONGeneticAlgorithm() for actionName, actionDnaList in actionPool.items(): if(len(actionDnaList) < 1): continue selectedPool = algorithm.selection_action(group, actionName, actionDnaList) crossedPool = algorithm
.crossover_action(selectedPool) mutatedPool = algorithm.mutation_action(crossedPool) update_action(group, actionName, mutatedPo
ol) selectedPool = algorithm.selection_property(group, propertyPool) crossedPool = algorithm.crossover_property(selectedPool) mutatedPool = algorithm.mutation_property(crossedPool) update_property(group, mutatedPool) # # update action dna in the group # # def update_action(group, actionName, actionPool=None): import random for element in group: if(len(actionPool)-1 < 0): continue evolve_idx = random.randint(0, len(actionPool)-1) if(element.Actions.__contains__(actionName)): element.Actions[actionName].ActionDNA = actionPool[evolve_idx] # # update property dna in the group # # def update_property(group, propertyPool=None): import random for prop_list in propertyPool: for element in group: if(len(prop_list)-1 < 0): continue update_idx = random.randint(0, len(prop_list)-1) for key_prop_list, element_prop_list in prop_list[update_idx].items(): if(element.PropertyDNA.__contains__(key_prop_list)): element.PropertyDNA[key_prop_list] = element_prop_list element.Properties[key_prop_list] = element_prop_list
EdwardJKim/nbgrader
nbgrader/preprocessors/clearsolutions.py
Python
bsd-3-clause
5,142
0.000778
from traitlets import Unicode, Bool from textwrap import dedent from .. import utils from . import NbGraderPreprocessor class ClearSolutions(NbGraderPreprocessor): code_stub = Unicode( "# YOUR CODE HERE\nraise NotImplementedError()", config=True, help="The code snippet that will replace code solutions") text_stub = Unicode( "YOUR ANSWER HERE", config=True, help="The text snippet that will replace written solutions") comment_mark = Unicode( "#", config=True, help="The comment mark to prefix solution delimiters") begin_solution_delimeter = Unicode( "## BEGIN SOLUTION", config=True, help="The delimiter marking the beginning of a solution (excluding comment mark)") end_solution_delimeter = Unicode( "## END SOLUTION", config=True, help="The delimiter marking the end of a solution (excluding comment mark)") enforce_metadata = Bool( True, config=True, help=dedent( """ Whether or not to complain if cells containing solutions regions are not marked as solution cells. WARNING: this will potentially cause things to break if you are using the full nbgrader pipeline. ONLY disable this option if you are only ever planning to use nbgrader assign. """ ) ) @property def begin_solution(self): return "{}{}".format(self.comment_mark, self.begin_solution_delimeter) @property def end_solution(self): return "{}{}".format(self.comment_mark, self.end_solution_delimeter) def _replace_solution_region(self, cell): """Find a region in the cell that is delimeted by `self.begin_solution` and `self.end_solution` (e.g. ### BEGIN SOLUTION and ### END SOLUTION). Replace that region either with the code stub or text stub, depending the cell type. This modifies the cell in place, and then returns True if a solution region was replaced, and False otherwise. """ # pull out the cell input/source lines = cell.source.split("\n") if cell.cell_type == "code": stub_lines = self.code_stub.split("\n") else: stub_lines = self.te
xt_stub.split("\n") new_lines = [] in_solution = False replaced_solution = False for line in lines: # begin the solution area if line.strip() == self.begin_solution: # check to
make sure this isn't a nested BEGIN # SOLUTION region if in_solution: raise RuntimeError( "encountered nested begin solution statements") in_solution = True replaced_solution = True # replace it with the stub, indented as necessary indent = line[:line.find(self.begin_solution)] for stub_line in stub_lines: new_lines.append(indent + stub_line) # end the solution area elif line.strip() == self.end_solution: in_solution = False # add lines as long as it's not in the solution area elif not in_solution: new_lines.append(line) # we finished going through all the lines, but didn't find a # matching END SOLUTION statment if in_solution: raise RuntimeError("no end solution statement found") # replace the cell source cell.source = "\n".join(new_lines) return replaced_solution def preprocess(self, nb, resources): nb, resources = super(ClearSolutions, self).preprocess(nb, resources) if 'celltoolbar' in nb.metadata: del nb.metadata['celltoolbar'] return nb, resources def preprocess_cell(self, cell, resources, cell_index): # replace solution regions with the relevant stubs replaced_solution = self._replace_solution_region(cell) # determine whether the cell is a solution/grade cell is_solution = utils.is_solution(cell) # check that it is marked as a solution cell if we replaced a solution # region -- if it's not, then this is a problem, because the cell needs # to be given an id if not is_solution and replaced_solution: if self.enforce_metadata: raise RuntimeError( "Solution region detected in a non-solution cell; please make sure " "all solution regions are within solution cells." ) # replace solution cells with the code/text stub -- but not if # we already replaced a solution region, because that means # there are parts of the cells that should be preserved if is_solution and not replaced_solution: if cell.cell_type == 'code': cell.source = self.code_stub else: cell.source = self.text_stub return cell, resources
semitki/canales
fileupload/urls.py
Python
mit
514
0.001946
# encoding: utf-8 from django.conf.urls i
mport url from fileupload.views import ( BasicVersionCreat
eView, BasicPlusVersionCreateView, jQueryVersionCreateView, AngularVersionCreateView, PictureCreateView, PictureDeleteView, PictureListView, ) urlpatterns = [ url(r'^new/$', PictureCreateView.as_view(), name='upload-new'), url(r'^delete/(?P<pk>\d+)$', PictureDeleteView.as_view(), name='upload-delete'), url(r'^view/$', PictureListView.as_view(), name='upload-view'), ]
akvo/akvo-rsr
akvo/rsr/migrations/0091_auto_20170208_1035.py
Python
agpl-3.0
2,382
0.001679
# -*- coding: utf-8 -*- from django.db import models, migrations def indicator_links(apps, schema_editor): """ Migration generating foreign keys from indicators and indicator periods in child results frameworks to parents of the same object type in the parent results framework """ Result = apps.get_model('rsr', 'Result') Indicator = apps.get_model('rsr', 'Indicator') IndicatorPeriod = apps.get_model('rsr', 'IndicatorPeriod') parent_results = Result.objects.annotate( children=models.Count('child_results') ).exclude(children=0) for result in parent_results: child_results = result.child_results.all() # Find all indicators for the current Result parent_indicators = Indicator.objects.filter(result=result) for parent_indicator in parent_indicators: # Child indicators have the same title etc and the parent indicator, and a result that's # a child of the current result child_indicators = Indicator.objects.filter( result__in=child_results, title=parent_indicator.title, measure=parent_indicator.measure, ascending=parent_indicator.ascending ) # Set FK in child indicators to parent indicator for child_indicator in child_indicators:
child_indicator.parent_indic
ator = parent_indicator # basic saving only super(Indicator, child_indicator).save() # Same pattern applies to IndicatorPeriods parent_periods = IndicatorPeriod.objects.filter(indicator__result=result) for parent_period in parent_periods: child_periods = IndicatorPeriod.objects.filter( indicator__result__in=child_results, indicator__title=parent_period.indicator.title, period_start=parent_period.period_start, period_end=parent_period.period_end ) for child_period in child_periods: child_period.parent_period = parent_period super(IndicatorPeriod, child_period).save() class Migration(migrations.Migration): dependencies = [ ('rsr', '0090_auto_20170207_2235'), ] operations = [ migrations.RunPython(indicator_links, reverse_code=lambda x, y: None), ]
eduNEXT/edunext-platform
import_shims/lms/experiments/stable_bucketing.py
Python
agpl-3.0
407
0.009828
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh.""" # pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long from import_shims.warn import warn_deprecated_impo
rt warn_deprecated_import('experiments.stable_bucketing', 'lms.djangoapps.experiments.stable_bucketing') from lms.dja
ngoapps.experiments.stable_bucketing import *
mshuffett/MetaPyMusic
playlister.py
Python
gpl-2.0
1,557
0.001285
import os import cPickle as pkl from collections import namedtuple import requests from bs4 import BeautifulSoup Song = namedtuple('Song', ['title', 'artist', 'album', 'length']) class Playlist(object): def __init__(se
lf, title, url): self.title = title self.file_name = title.lower().replace(' ', '-') + '.pkl' self.url = url if os.path.isfile(self.file_name): self.load_from_pickle() else: self.songs = [] def load_from_pickle(self): with open(self.file_name, 'rb') as in_file: self.songs = pkl.load(in_file) def download_data(self): url = self.url resp = requests.g
et(url) soup = BeautifulSoup(resp.text) for song_elem in (soup.find(class_='songs') .find_all(class_='media-body')): title = song_elem.h4.text ps = song_elem.find_all('p') artist, album = ps[0].text.split(u' \xb7 ') length = ps[1].text song = Song(title, artist, album, length) self.songs.append(song) with open(self.file_name, 'wb') as out: pkl.dump(self.songs, out) ambient_bass = Playlist( 'ambient bass', 'http://www.playlister.io/items/playlist/1472493/ambient-bass/#') beats = Playlist( 'Blissed-Out Beats', 'http://www.playlister.io/items/playlist/1682151/') liquid = Playlist( 'Liquid Dubstep', 'http://www.playlister.io/items/playlist/1404323/') liquid.download_data()
jayceyxc/hue
apps/about/src/about/tests.py
Python
apache-2.0
2,701
0.007775
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json f
rom django.contrib.auth.models import User from django.core.urlresolvers import reverse from nose.tools import assert_true, assert_false, assert_equal from desktop.lib.django_test_util import make_logged_in_client from desktop.lib.test_utils import grant_access from desktop.models import Settings from oozie.tests import OozieBase class TestAboutBase(object): def setUp(self): self.client = make_logged_in_client(username="about", is_superuser=False) grant_access("about",
"about", "about") self.client_admin = make_logged_in_client(username="about_admin", is_superuser=True) grant_access("about_admin", "about_admin", "about") class TestAbout(TestAboutBase, OozieBase): def test_admin_wizard_permissions(self): response = self.client_admin.get(reverse('about:index')) assert_true('Check Configuration' in response.content, response.content) response = self.client.get(reverse('about:index')) assert_false('Check Configuration' in response.content, response.content) class TestAboutWithNoCluster(TestAboutBase): def test_dump_config(self): # Exception raised if bad unicode self.client_admin.get(reverse('about:index'), HTTP_ACCEPT_LANGUAGE='fr-fr') def test_collect_usage(self): collect_usage = Settings.get_settings().collect_usage try: response = self.client.post(reverse('about:update_preferences'), {'collect_usage': False}) data = json.loads(response.content) assert_equal(data['status'], 0) assert_false(data['collect_usage'] == True) # Weird but works response = self.client.post(reverse('about:update_preferences'), {'collect_usage': True}) data = json.loads(response.content) assert_equal(data['status'], 0) assert_true(data['collect_usage']) finally: settings = Settings.get_settings() settings.collect_usage = collect_usage settings.save()
Thortoise/Super-Snake
Blender/animation_nodes-master/nodes/number/float_range_list.py
Python
gpl-3.0
1,324
0.009819
import bpy from bpy.props import * from ... sockets.info import toListDataType from ... base_types.node import AnimationNode class FloatRangeListNode(bpy.types.Node, AnimationNode): bl_idname = "an_FloatRangeListNode" bl_label = "Number Range" dynamicLabelType = "ALWAYS" onlySearchTags = True searchTags = [ ("Float Range", {"dataType" : repr("Float")}), ("Integer Range", {"dataType" : repr("Integer")}) ] def dataTypeChanged(self, context): self.generateSockets() dataType = StringProperty(default = "Float", update = dataTypeChanged) def create(self): self.generateSockets() def drawLabel(self): return self.inputs[1].dataType + " Range" def generateSockets(self): self.inputs.clear() self.outputs.clear() self.newInput("Integer", "Amount", "amount", value = 5) self.newInput(self.dataType, "Start", "start
") self.newInput(self.dataType, "Step", "step", value = 1) self.newOutput(toListDataType(self.dataType), "List", "list") def getExecutionCode(self): if self.dataType == "Float": return "list = [start + i * step for i in
range(amount)]" if self.dataType == "Integer": return "list = [int(start + i * step) for i in range(amount)]"
owlabs/incubator-airflow
tests/contrib/hooks/test_gcp_video_intelligence_hook.py
Python
apache-2.0
3,290
0.002736
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import unittest from airflow.contrib.hooks.gcp_video_intelligence_hook import CloudVideoIntelligenceHook from google.cloud.videointelligence_v1 import enums from tests.contrib.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id from tests.compat import mock INPUT_URI = "gs://bucket-name/input-file" OUTPUT_URI = "gs://bucket-name/output-file" FEATURES = [enums.Feature.LABEL_DETECTION] ANNOTATE_VIDEO_RESPONSE = {'test': 'test'} class CloudVideoIntelligenceHookTestCase(unittest.TestCase): def setUp(self): with mock.patch( "airflow.contrib.hooks.gcp_video_intelligence_hook.CloudVideoIntelligenceHook.__init__", new=mock_base_gcp_hook_default_project_id, ): self.hook = CloudVideoIntelligenceHook(gcp_conn_id="test") @mock.patch("airflow.contrib.hooks.gcp_video_intelligence_hook.CloudVideoIntelligenceHook.get_conn") def test_annotate_video(self, get_conn): # Given annotate_video_method = get_conn.return_value.annotate_video get_conn.return_value.annotate_video.return_value = ANNOTATE_VIDEO_RESPONSE # When result = self.hook.annotate_video(input_uri=INPUT_URI, features=FEATURES) # Then self.assertIs(result, ANNOTATE_VIDEO_RESPONSE) annotate_video_method.assert_called_once_with( input_uri=INPUT_URI, input_content=None, features=FEATURES, video_context=None, output_uri=None, location_id=None, retry=None, timeout=None, metadata=None, ) @mock.patch("airflow.contrib.hooks.gcp_video_intelligence_hook.CloudVideoIntelligenceHook.get_conn") def test_annotate_video_with_output_uri(self, get_conn): # Given annotate_video_method = get_conn.return_value.annotate_video get_conn.return_value.annotate_video.return_value = ANNOTATE_VIDEO_RESPONSE # When result = self.hook.annotate_video(input_uri=INPUT_URI, output_uri=OUTPUT_URI, features=FEATURES) # Then self.assertIs(result, ANNOTATE_VIDEO_RESPONSE) annotate_video_method.assert_called_once_with( input_uri=INPUT_URI, output_uri=OUTPUT_URI, input_content=None, features=FEATURES, video_context
=None,
location_id=None, retry=None, timeout=None, metadata=None, )
wagnerand/zamboni
mkt/developers/urls.py
Python
bsd-3-clause
8,583
0.001864
from django import http from django.conf.urls import include, patterns, url from rest_framework.routers import SimpleRouter from lib.misc.urlconf_decorator import decorate import amo from amo.decorators import write from amo.urlresolvers import reverse from mkt.api.base import SubRouter from mkt.developers.api import ContentRatingList, ContentRatingsPingback from mkt.developers.api_payments import ( AddonPaymentAccountViewSet, PaymentAccountViewSet, PaymentCheckViewSet, PaymentDebugViewSet, PaymentViewSet, UpsellViewSet) from mkt.developers.decorators import use_apps from mkt.receipts.urls import test_patterns from . import views from . import views_payments def provider_patterns(prefix): return patterns('', url('^accounts$', views_payments.payment_accounts, name='mkt.developers.%s.payment_accounts' % prefix), url('^accounts/form$', views_payments.payment_accounts_form, name='mkt.developers.%s.payment_accounts_form' % prefix), url('^accounts/add$', views_payments.payments_accounts_add, name='mkt.developers.%s.add_payment_account' % prefix), url('^accounts/(?P<id>\d+)/delete$', views_payments.payments_accounts_delete, name='mkt.developers.%s.delete_payment_account' % prefix), url('^accounts/(?P<id>\d+)$', views_payments.payments_account, name='mkt.developers.%s.payment_account' % prefix), url('^accounts/(?P<id>\d+)/agreement/$', views_payments.agreement, name='mkt.developers.%s.agreement' % prefix) ) # These will all start with /app/<app_slug>/ app_detail_patterns = patterns('', # Redirect people who go to / instead of /edit. ('^$', lambda r, app_slug: http.HttpResponseRedirect( reverse('mkt.developers.apps.edit', args=[app_slug]))), url('^edit$', views.edit, name='mkt.developers.apps.edit'), url('^edit_(?P<section>[^/]+)(?:/(?P<editable>[^/]+))?$', views.addons_section, name='mkt.developers.apps.section'), url('^refresh_manifest$', views.refresh_manifest, name='mkt.developers.apps.ref
resh_manifest'), url('^ownership$', views.ownership, name='mkt.developers.apps.owner'), url('^enable$', views.enable, name='mkt.developers.apps.enable'), url('^delete$', views.delete, name='mkt.developers
.apps.delete'), url('^disable$', views.disable, name='mkt.developers.apps.disable'), url('^publicise$', views.publicise, name='mkt.developers.apps.publicise'), url('^status$', views.status, name='mkt.developers.apps.versions'), url('^blocklist$', views.blocklist, name='mkt.developers.apps.blocklist'), # IARC content ratings. url('^content_ratings$', views.content_ratings, name='mkt.developers.apps.ratings'), url('^content_ratings/edit$', views.content_ratings_edit, name='mkt.developers.apps.ratings_edit'), url('^status/preload$', views.preload_home, name='mkt.developers.apps.preload_home'), url('^status/preload/submit$', views.preload_submit, name='mkt.developers.apps.preload_submit'), # TODO: '^versions/$' url('^versions/(?P<version_id>\d+)$', views.version_edit, name='mkt.developers.apps.versions.edit'), url('^versions/delete$', views.version_delete, name='mkt.developers.apps.versions.delete'), url('^versions/publicise$', views.version_publicise, name='mkt.developers.apps.versions.publicise'), url('^payments/$', views_payments.payments, name='mkt.developers.apps.payments'), url('^payments/disable$', views_payments.disable_payments, name='mkt.developers.apps.payments.disable'), url('^payments/bango-portal$', views_payments.bango_portal_from_addon, name='mkt.developers.apps.payments.bango_portal_from_addon'), # in-app payments. url('^in-app-config/$', views_payments.in_app_config, name='mkt.developers.apps.in_app_config'), url('^in-app-secret/$', views_payments.in_app_secret, name='mkt.developers.apps.in_app_secret'), # Old stuff. url('^upload_preview$', views.upload_media, {'upload_type': 'preview'}, name='mkt.developers.apps.upload_preview'), url('^upload_icon$', views.upload_media, {'upload_type': 'icon'}, name='mkt.developers.apps.upload_icon'), url('^upload_image$', views.upload_media, {'upload_type': 'image'}, name='mkt.developers.apps.upload_image'), url('^rmlocale$', views.remove_locale, name='mkt.developers.apps.remove-locale'), # Not apps-specific (yet). url('^file/(?P<file_id>[^/]+)/validation$', views.file_validation, name='mkt.developers.apps.file_validation'), url('^file/(?P<file_id>[^/]+)/validation.json$', views.json_file_validation, name='mkt.developers.apps.json_file_validation'), url('^upload$', views.upload_for_addon, name='mkt.developers.upload_for_addon'), url('^upload/(?P<uuid>[^/]+)$', views.upload_detail_for_addon, name='mkt.developers.upload_detail_for_addon'), ) # These will all start with /ajax/app/<app_slug>/ ajax_patterns = patterns('', url('^image/status$', views.image_status, name='mkt.developers.apps.ajax.image.status'), ) urlpatterns = decorate(write, patterns('', # Redirect people who have /apps/ instead of /app/. ('^apps/\d+/.*', lambda r: http.HttpResponseRedirect(r.path.replace('apps', 'app', 1))), # Standalone validator: url('^validator/?$', views.validate_addon, name='mkt.developers.validate_addon'), # Redirect to /addons/ at the base. url('^submissions$', use_apps(views.dashboard), name='mkt.developers.apps'), url('^upload$', views.upload_new, name='mkt.developers.upload'), url('^upload/([^/]+)(?:/([^/]+))?$', views.upload_detail, name='mkt.developers.upload_detail'), url('^standalone-hosted-upload$', views.standalone_hosted_upload, name='mkt.developers.standalone_hosted_upload'), url('^standalone-packaged-upload$', views.standalone_packaged_upload, name='mkt.developers.standalone_packaged_upload'), url('^standalone-(hosted|packaged)-upload/([^/]+)$', views.standalone_upload_detail, name='mkt.developers.standalone_upload_detail'), # Standalone tools. url('^upload-manifest$', views.upload_manifest, name='mkt.developers.upload_manifest'), url('^in-app-keys/$', views_payments.in_app_keys, name='mkt.developers.apps.in_app_keys'), url('^in-app-key-secret/([^/]+)$', views_payments.in_app_key_secret, name='mkt.developers.apps.in_app_key_secret'), # URLs for a single app. url('^app/%s/' % amo.APP_SLUG, include(app_detail_patterns)), url('^ajax/app/%s/' % amo.APP_SLUG, include(ajax_patterns)), url('^terms$', views.terms, name='mkt.developers.apps.terms'), url('^api$', views.api, name='mkt.developers.apps.api'), # Developer docs url('docs/(?P<doc_name>[-_\w]+)?$', views.docs, name='mkt.developers.docs'), url('docs/(?P<doc_name>[-_\w]+)/(?P<doc_page>[-_\w]+)', views.docs, name='mkt.developers.docs'), url('^transactions/', views.transactions, name='mkt.developers.transactions'), # Bango-specific stuff. url('^provider/', include(provider_patterns('provider'))), url('^test/$', views.testing, name='mkt.developers.apps.testing'), url('^test/receipts/', include(test_patterns)), )) api_payments = SimpleRouter() api_payments.register(r'account', PaymentAccountViewSet, base_name='payment-account') api_payments.register(r'upsell', UpsellViewSet, base_name='app-upsell') api_payments.register(r'app', AddonPaymentAccountViewSet, base_name='app-payment-account') app_payments = SubRouter() app_payments.register(r'payments', PaymentViewSet, base_name='app-payments') app_payments.register(r'payments/status', PaymentCheckViewSet, base_name='app-payments-status') app_payments.register(r'payments/debug', PaymentDebugViewSet, base_name='app-payments-debug') payments_api_patterns = patterns('', ur
smithsane/openstack-env
openstack_env/openstack.py
Python
bsd-3-clause
3,248
0
# Copyright (c) 2015, Artem Osadchyi # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import glanceclient as glance_client from keystoneclient.v2_0 import client as identity_client from novaclient import client as compute_client from saharaclient.api import client as data_processing_client GLANCE_VERSION = 1 NOVA_VERSION = 2 def client(credentials): return OpenStack(credentials) def identity(credentials): return identity_client.Client( username=credentials.user_name, password=credentials.password, tenant_name=credentials.tenant, auth_url=credentials.auth_url, ) def compute(credentials): return compute_client.Client( version=NOVA_VERSION, username=credentials.user_name, api_key=credentials.password, project_id=credentials.tenant, auth_url=credentials.auth_url, ) def images(credentials): return glance_client.Client( version=GLANCE_VERSION, endpoint=_get_url("image", credentials), token=credentials.auth_token, ) def data_processing(credentials): sahara_url
= _get_url("data-processing", credentials) sahara_url += "/" + credentials.tenant_id return
data_processing_client.Client( input_auth_token=credentials.auth_token, project_name=credentials.tenant, sahara_url=sahara_url, ) def _get_url(service_type, credentials): i_client = identity(credentials) service = i_client.services.find(type=service_type) endpoint = i_client.endpoints.find(service_id=service.id) return endpoint.publicurl class OpenStack(object): def __init__(self, credentials): self._credentials = credentials self._compute = None self._images = None self._identity = None self._data_processing = None self._auth_token = None @property def compute(self): if not self._compute: self._compute = compute(self._credentials) return self._compute @property def images(self): if not self._images: self._images = images(self._credentials) return self._images @property def identity(self): if not self._identity: self._identity = identity(self._credentials) return self._identity @property def data_processing(self): if not self._data_processing: self._data_processing = data_processing(self._credentials) return self._data_processing
Debian/devscripts
scripts/devscripts/test/test_help.py
Python
gpl-2.0
2,924
0
# test_help.py - Ensure scripts can run --help. # # Copyright (C) 2010, Stefano Rivera <stefanor@ubuntu.com> # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, # INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM # LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR # OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR # PERFORMANCE OF THIS SOFTWARE. import fcntl import os import select import signal import subprocess import time import unittest from . import SCRIPTS TIMEOUT = 5 def load_tests(loader, tests, pattern): # pylint: disable=unused-argument "Give HelpTestCase a chance to populate before loading its test cases" suite = unittest.TestSuite() HelpTestCase.populate() suite.addTests(loader.loadTestsFromTestCase(HelpTestCase)) return suite class HelpTestCase(unittest.TestCase): @classmethod def populate(cls): for script in SCRIPTS: setattr(cls, 'test_' + script, cls.make_help_tester(script)) @classmethod def make_help_tester(cls, script): def tester(self): with open('/dev/null', 'r') as null: process = subprocess.Popen(['./' + script, '--help'], close_fds=True, stdin=null, stdout=subprocess.PIPE, stderr=subprocess.PIPE) started = time.time() out = [] fds = [process.stdout.fileno(), process.stderr.fileno()]
for fd in fds: fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK) while time.time() - started < TIMEOUT: for fd in select.select(fds, [], fds,
TIMEOUT)[0]: out.append(os.read(fd, 1024)) if process.poll() is not None: break if process.poll() is None: os.kill(process.pid, signal.SIGTERM) time.sleep(1) if process.poll() is None: os.kill(process.pid, signal.SIGKILL) self.assertEqual(process.poll(), 0, "%s failed to return usage within %i seconds.\n" "Output:\n%s" % (script, TIMEOUT, ''.encode('ascii').join(out))) return tester
lmazuel/azure-sdk-for-python
azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/vault_certificate.py
Python
mit
2,185
0
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class VaultCertificate(Model): """Describes a single certificate reference in a Key Vault, and where the certificate should reside on the VM. :param certificate_url: This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8: <br><br> {<br> "data":"<Base64-encoded-certificate>",<br> "dataType":"pfx",<br> "password":"<pfx-file-password>"<br>} :type certificate_url: str :param certificate_store: For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly in the LocalMachine account. <br><br>For Linux VMs, the certificate file is placed under the /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt for the X509 certificate file and <UppercaseThumbpring>.prv for private key. Both of these files are .pem formatted. :type c
ertificate_store: str """ _attribute_map = { 'certificate_url': {'key': 'certificateUrl', 'type': 'str'}, 'certificate_store': {'key': 'certificateStore', 'type': 'str'},
} def __init__(self, **kwargs): super(VaultCertificate, self).__init__(**kwargs) self.certificate_url = kwargs.get('certificate_url', None) self.certificate_store = kwargs.get('certificate_store', None)
fernandolobato/balarco
clients/migrations/0001_initial.py
Python
mit
614
0.001629
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-02-17 01:37 from __future__ import unicode_literals from django.db import mig
rations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Client', fields=[ ('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('address', models.CharField(max_length=100)), ], ), ]
Azure/azure-sdk-for-python
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2018_03_01/aio/operations/_action_groups_operations.py
Python
mit
24,867
0.005027
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class ActionGroupsOperations: """ActionGroupsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~$(python-base-namespace).v2018_03_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def create_or_update( self, resource_group_name: str, action_group_name: str, action_group: "_models.ActionGroupResource", **kwargs: Any ) -> "_models.ActionGroupResource": """Create a new action group or update an existing one. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param action_group_name: The name of the action group. :type action_group_name: str :param action_group: The action group to create or use for the update. :type action_group: ~$(python-base-namespace).v2018_03_01.models.ActionGroupR
esource :keyword callable cls: A custom type or function that will be passed the direct response :return: ActionGroupResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_03_01.models.ActionGroupResource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupResource"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-03-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(action_group, 'ActionGroupResource') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('ActionGroupResource', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('ActionGroupResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'} # type: ignore async def get( self, resource_group_name: str, action_group_name: str, **kwargs: Any ) -> "_models.ActionGroupResource": """Get an action group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param action_group_name: The name of the action group. :type action_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ActionGroupResource, or the result of cls(response) :rtype: ~$(python-base-namespace).v2018_03_01.models.ActionGroupResource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupResource"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-03-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = s
jorisvanzundert/sfsf
sfsf/epub_to_txt_parser.py
Python
mit
3,042
0.029934
import sys, getopt import errno import os.path import epub import lxml from bs4 import BeautifulSoup class EPubToTxtParser: # Epub parsing specific code def get_linear_items_data( self, in_file_name ): book_items = [] book = epub.open_epub( in_file_name ) for item_id, linear in book.opf.spine.itemrefs: item = book.get_item( item_id ) if linear: data = book.read_item( item ) book_items.append( data ) return book_items def get_narrative( self, linear_items_data ): avg_len = 0 count = 0 for data in linear_items_data: count += 1 avg_len = ( ( avg_len * ( count - 1 ) ) + len( data ) ) / count book_narrative = [ data for data in linear_items_data if len(data) >= avg_len ] return book_narrative def extract_paragraph_text( self, book_narrative ): paragraph_text = '' for data in book_narrative: soup = BeautifulSoup( data, "lxml" ) paragraphs = soup.find_all( 'p' ) # Thanks to Eric Storms for finding the solution # to some 0kB parse results… if paragraphs == []: paragraphs = soup.find_all( 'div' ) for paragraph in paragraphs: paragraph_text += ( paragraph.get_text() + '\n' ) return paragraph_text def narrative_from_epub_to_txt( self, in_file_name ): if os.path.isfile( in_file_name ): book_items = self.get_linear_items_data( in_file_name ) book_narrative = self.get_narrative( book_items ) paragraph_text = self.extract_paragraph_text( book_narrative ) return( paragraph_text ) else: raise FileNotFoundError( errno.ENOENT, os.strerror( errno.ENOENT ), in_file_name ) # Command line usage stuff def print_usage_and_exit(): print( "Usage: %s -i epub_file_in -o txt_file_out" % sys.argv[ 0 ] ) sys.exit( 2 ) def parse_opts( opts ): in_file_name, ou
t_file_name = None, None for o, a in opts: if o == '-i': in_file_name = a elif o == '-o': out_file_name = a return ( in_file_name, out_file_name ) # Main if __name__ == '__main__': try: opts, args = getopt.getopt(sys.argv[1:], "i:o:") assert( len(opts) != 0 ) in_file_name, out_file_name = parse_
opts( opts ) except getopt.GetoptError as e: print( str( e ) ) print_usage_and_exit() except AssertionError: print_usage_and_exit() try: parser = EPubToTxtParser() narrative_text = parser.narrative_from_epub_to_txt( in_file_name ) if( out_file_name != None ): with open( out_file_name, "w" ) as out_file: out_file.write( narrative_text ) out_file.close() else: print( narrative_text ) except FileNotFoundError: print( "File not found: {file_name}".format( file_name = in_file_name ) )
valsson/plumed2
user-doc/tutorials/old_tutorials/munster/SCRIPTS/do_fes.py
Python
lgpl-3.0
2,940
0.019388
import math import sys # read FILE with CVs and weights FILENAME_ = sys.argv[1] # number of CVs for FES NCV_ = int(sys.argv[2]) # read minimum, maximum and number of bins for FES grid gmin = []; gmax = []; nbin = [] for i in range(0, NCV_): i0 = 3*i + 3 gmin.append(float(sys.argv[i0])) gmax.append(float(sys.argv[i0+1])) nbin.append(int(sys.argv[i0+2])) # read KBT_ KBT_ = float(sys.argv[3*NCV_+3]) # read output fes FESFILE_ = sys.argv[3*NCV_+4] def get_indexes_from_index(index, nbin): indexes = [] # get first index indexes.append(index%nbin[0]) # loop kk = index for i in range(1, len(nbin)-1): kk = ( kk - indexes[i-1] ) / nbin[i-1] indexes.append(kk%nbin[i]) if(len(nbin)>=2): indexes.append( ( kk - indexes[len(nbin)-2] ) / nbin[len(nbin) -2] ) return indexes def get_indexes_from_cvs(cvs, gmin, dx): keys = [] for i in range(0, len(cvs)): keys.append(int( round( ( cvs[i] - gmin[i] ) / dx[i] ) )) return tuple(keys) def get_points(key, gmin, dx): xs = [] for i in range(0, len(key)): xs.append(gmin[i] + float(key[i]) * dx[i]) return xs # define bin size dx = [] for i in range(0, NCV_): dx.append( (gmax[i]-gmin[i])/float(nbin[i]-1) ) # create histogram histo = {} # read file and fill in histogram for lines in open(FILENAME_, "r").readlines(): riga = lines.strip().split() # check format if(len(riga)!=NCV_ and len(riga)!=NCV_+1): print FILENAME_,"is in the wrong format!" exit() # read CVs cvs = [] for i in range(0, NCV_): cvs.append(float(riga[i])) # get indexes key = get_indexes_from_cvs(cvs, gmin, dx) if(len(riga)==NCV_+1): # read weight w = float(riga[NCV_]) else: w = 1.0 # update histogram if key in histo: histo[key] += w else: hi
sto[key] = w # calculate free-energy and minimum value min_fes = 1.0e+15 for
key in histo: histo[key] = -KBT_ * math.log(histo[key]) if(histo[key] < min_fes): min_fes = histo[key] # total numbers of bins nbins = 1 for i in range(0, len(nbin)): nbins *= nbin[i] # print out FES log = open(FESFILE_, "w") # this is needed to add a blank line xs_old = [] for i in range(0, nbins): # get the indexes in the multi-dimensional grid key = tuple(get_indexes_from_index(i, nbin)) # get CV values for that grid point xs = get_points(key, gmin, dx) # add a blank line for gnuplot if(i == 0): xs_old = xs[:] else: flag = 0 for j in range(1,len(xs)): if(xs[j] != xs_old[j]): flag = 1 xs_old = xs[:] if (flag == 1): log.write("\n") # print value of CVs for x in xs: log.write("%12.6lf " % x) # print FES if key in histo: fes = histo[key]-min_fes log.write(" %12.6lf\n" % fes) else: log.write(" Infinity\n") log.close()
feist/pcs
pcs/rule.py
Python
gpl-2.0
41,255
0.000194
import re import xml.dom.minidom from typing import List, Any, Optional from pcs import utils from pcs.cli.reports.output import warn from pcs.common import ( const, pacemaker, ) from pcs.common.str_tools import format_list_custom_last_separator # pylint: disable=not-callable # main functions def parse_argv(argv, extra_options=None): """ Commandline options: no options """ options = {"id": None, "role": None, "score": None, "score-attribute": None} if extra_options: options.update(dict(extra_options)) # parse options while argv: found = False option = argv.pop(0) for name in options: if option.startswith(name + "="): options[name] = option.split("=", 1)[1] found = True break if not found: argv.insert(0, option) break return options, argv def dom_rule_add(dom_element, options, rule_argv, cib_schema_version): # pylint: disable=too-many-branches """ Commandline options: no options """ # validate options if options.get("score") and options.get("score-attribute"): utils.err("can not specify both score and score-attribute") if options.get("score") and not utils.is_score(options["score"]): # preserving legacy behaviour print( "Warning: invalid score '%s', setting score-attribute=pingd instead" % options["score"] ) warn( "Converting invalid score to score-attribute=pingd is deprecated " "and will be removed.", stderr=True, ) options["score-attribute"] = "pingd" options["score"] = None if options.get("role"): role = options["role"].capitalize() supported_roles = ( const.PCMK_ROLES_PROMOTED + const.PCMK_ROLES_UNPROMOTED ) if role not in supported_roles: utils.err( "invalid role '{role}', use {supported_roles}".format( role=options["role"], supported_roles=format_list_custom_last_separator( list(supported_roles), " or " ), ) ) options["role"] = pacemaker.role.get_value_for_cib( role, cib_schema_version >= const.PCMK_NEW_ROLES_CIB_VERSION, ) if options.get("id"): id_valid, id_error = utils.validate_xml_id(options["id"], "rule id") if not id_valid: utils.err(id_error) if utils.does_id_exist(dom_element.ownerDocument, options["id"]): utils.err( "id '%s' is already in use, please specify another one" % options["id"] ) # parse rule if not rule_argv: utils.err("no rule expression was specified") try: preprocessor = TokenPreprocessor() dom_rule = CibBuilder(cib_schema_version).build( dom_element, RuleParser().parse(preprocessor.run(rule_argv)), options.get("id"), ) except SyntaxError as e: utils.err( "'%s' is not a valid rule expression: %s" % (" ".join(rule_argv), e) ) except UnexpectedEndOfInput as e: utils.err( "'%s' is not a valid rule expression: unexpected end of rule" % " ".join(rule_argv) ) except (ParserException, CibBuilderException) as e: utils.err("'%s' is not a valid rule expression" % " ".join(rule_argv)) for msg in preprocessor.warning_list: warn(msg, stderr=True) # add options into rule xml if not options.get("score") and not options.get("score-attribute"): options["score"] = "INFINITY" for name, value in options.items(): if name != "id" and value is not None: dom_rule.setAttribute(name, value) # score or score-attribute is required for the nested rules in order to have # valid CIB, pacemaker does not use the score of the nested rules for rule in dom_rule.getElementsByTagName("rule"): rule.setAttribute("score", "0") if dom_element.hasAttribute("score"): dom_element.removeAttribute("score") if dom_element.hasAttribute("node"): dom_element.removeAttribute("node") return dom_element class ExportDetailed: def __init__(self): self.show_detail = False self.rule_expired = False def get_string(self, rule, rule_expired, show_detail, indent=""): self.show_detail = show_detail self.rule_expired = rule_expired return indent + ("\n" + indent).join(self.list_rule(rule)) def list_rule(self, rule): rule_parts = [ "Rule{0}: {1}".format( " (expired)" if self.rule_expired else "", " ".join(self._list_attributes(rule)), ) ] for child in rule.childNodes: if child.nodeType == xml.dom.minidom.Node.TEXT_NODE: continue if child.tagName == "expression": self.indent_append(rule_parts, self.list_expression(child)) elif child.tagName == "date_expression": self.indent_append(rule_parts, self.list_date_expression(child)) elif child.tagName == "rule": self.indent_append(rule_parts, self.list_rule(child)) return rule_parts def list_expression(self, expression): if "value" in expression.attributes.keys(): exp_parts = [ expression.getAttribute("attribute"), expression.getAttribute("operation"), ] if expression.hasAttribute("type"): exp_parts.append(expression.getAttribute("type")) exp_parts.append(expression.getAttribute("value")) else: exp_parts = [ expression.getAttribute("operation"), expression.getAttribute("attribute"), ] if self.show_detail: exp_parts.append("(id:%s)" % expression.getAttribute("id")) return ["Expression: %s" % " ".join(exp_parts)] def list_date_expression(self, expression): operation = expression.getAttribute("operation") if operation == "date_spec": date_spec_parts = self._list_attributes( expression.getElementsByTagName("date_spec")[0] ) exp_parts = ["Expression:"] if self.show_detail: exp_parts.append("(id:%s)" % expression.getAttribute("id")) return self.indent_append( [" ".join(exp_parts)], ["Date Spec: %s" % " ".join(date_spec_parts)], ) if operation == "in_range": exp_parts = ["date", "in_range"] if expression.hasAttribute("start"): exp_parts.extend([expression.getAttribute("start"), "to"]) if expression.hasAttribute("end"): exp_parts.append(expression.getAttribute("end")) durations = expression.getElementsByTagName("duration") if durations: exp_parts.append("duration") duration_parts = self._list_attributes(durations[0]) if self.show_detail: exp_parts.append("(id:%s)" % expression.getAttribute("id")) result = ["Expression: %s" % " ".join(exp_parts)] if durations: self.indent_append( result, ["Duration: %s" % " ".join(duration_parts)] ) return result exp_parts = ["date", expression.getAttribute("operation")] if expression.hasAttribute("start"): exp_parts.append(expression.getAttribute("start")) if expression.hasAttribute("end"): exp_parts.append(expression.getAttribute("end")) if self.show_detail
: exp_parts.append("(id:%s)" % expression.getAttribute("id"
)) return ["Expression: " + " ".join(exp_parts)] def _list_attributes(self, element): attributes = utils.dom_attrs_to_list(element, with_id=False)
white111/CMtestpy
bin/cmtest.py
Python
lgpl-2.1
7,516
0.023018
#!/usr/bin/python ################################################################################ # # Module: cmtest.py # # Author: Paul Tindle ( mailto:Paul@Tindle.org ) # # Descr: Main Test executive # # Version: 0.1 $Id$ # # Changes: 05/18/17 Conversion from perl - JSW # # Still ToDo: # # License: This software is subject to and may be distributed under the # terms of the GNU General Public License as described in the # file License.html found in this or a parent directory. # Forward any and all validated updates to Paul@Tindle.org # # Copyright (c) 1995 - 2005 Paul Tindle. All rights reserved. # Copyright (c) 2005-2008 Stoke. All rights reserved. # Copyright (c) 2017 Joe White. All rights reserved. # ################################################################################ VER= 'v0.1 5/9/2017'; # Conversion to Python from Perl 050917 JSW CVS_VER = ' [ CVS: $Id: Logs.pm,v 1.10 2011/01/21 18:38:56 joe Exp $ ]'; global CMtestVersion; if not "CMtestVersion" in globals() : CMtestVersion={} CMtestVersion['cmtest'] = VER + CVS_VER import Globals #from Globals import * import Util import Init import FileOp import Logs import Connect import Menu import sys sys.path.append("../lib;") import os import os.path from os.path import expanduser import socket from optparse import OptionParser #import lib # import private library functions for CMtestpy, see lib/__init__.py #import Globals #print(globals()) #from lib.Util import Abort #from lib.Globals import Myglobals #import Mav #import Power #import lib.GUI import sys, traceback # catch keyboard interupt import platform from os.path import isfile, join #__________________________________________________________________________ def main(): #Globals.Myglobals() #import Globals print (Globals.Debug) #print("My Debug = %i" % Debug) print(globals()) #Debug flag #global Debug #Debug = 1 #global Verbose #global Menu1 #global session #global Cfg_File #global Tmp #global CmdFilePath #global Version #global Session #global SessionForce #global CMPipe; CMPipe=os.getenv('CmTest_Release_Pipe', "No_Pipe") #global UserID #global Out_File #global Loop_overide #global shucks; shucks = 0 #global GlobalVar #print (global()) #Get input from command line usage = "usage: %prog session#" parser = OptionParser(usage) parser.add_option("-d", "--debug", action="count", dest="Debug", default=0, help="Turn on Debug Stetments") parser.add_option("-v", "--verbose", action="count", dest="Verbose", default=0, help="Turn on more output") parser.add_option("-B", "--Batch", dest="Menu1", default="", help="Batch Mode - no Menu prompt, does not support multi level menu" ) parser.add_option("-s", "--session", dest="Session", type="int", default=0, help="Set Sesion #, Default is first avaiable") parser.add_option("-L", "--Loop", dest="Loop", type="int", default=0, help="Overide all Loop counts(se
conds)") parser.add_option("-F", "--Force", dest="Force", type="int", default=0, help="Force Session #") parser.add_option("-r", "--regress", dest="Regress", default="null", help="Directly execute a subroutine") parser.add_option("-U", "--User", dest="User", default="None", help="Set User ID") parser.add_option("-O", "--Output", dest="Output", default=r"cmtest.xml", help="Set Output XML fi
le, will default to tmp/cmtest.xml") (options, args) = parser.parse_args() #if not options.Session : #parser.error("-s session# required") Globals.Debug += options.Debug Globals.Verbose += options.Verbose Globals.Menu1 = options.Menu1 Globals.Regress = options.Regress Globals.Session = options.Session Globals.SessionForce = options.Force Globals.Force = options.Force Globals.CurrentUserID = options.User Globals.Out_File = options.Output Globals.Loop_overide = options.Loop OS = os.name if os.name == "nt": Globals.OS = "NT" else: Globals.OS = "Linux" if Globals.Debug : print ("Detected OS: %s " % Globals.OS) #Get our base directory and find the Station Config File File = os.path.abspath(__file__) Globals.GP_Path = FileOp.fnstrip(File,1) PPATH = FileOp.fnstrip(File,1) if Globals.Debug : print ("OS path detected is: %s " % Globals.GP_Path) if Globals.GP_Path == '': Globals.GP_Path = ".." if Globals.OS == "NT": Globals.Cfg_File = join(Globals.GP_Path,"cfgfiles","testctrl.defaults.cfg") #Globals[LogPath] = "\Logs" TmpDir = expanduser("~") else: Globals.Cfg_File = '/usr/local/cmtest/testctrl.cfg' #Globals[LogPath] = r"/var/local/cmtest/logs" TmpDir = expanduser("~") + "/tmp" if Globals.Debug : print ("Config path detected is: %s " % Globals.Cfg_File) #if OS == 'nt': #Cfg_File = PPath + "/" + "cfgfiles/testctrl.defaults.cfg" #Tmp = os.getenv('TMP', "NO_TMP") #else : #Cfg_File = r'/usr/local/cmtest/testctrl.cfg' #Tmp = os.getenv(expanduser("~") + "/tmp", "NO_TMP") Globals.CmdFilePath = r"../" + PPATH +r"/cmdfiles" Util.ASCIIColor('reset') _Init() GUI = 0 # uneeded Perl &GUI_Init if $GUI; Quiet = 0; # Don't allow since we only have a char menu right now shucks = 0 try: Menu.Menu_main() # Bring up menu and start excution except KeyboardInterrupt: print( "Shutdown requested...exiting") _catch_zap() except Exception: traceback.print_exc(file=sys.stdout) sys.exit(0) if not Quiet : print("done\n") Util.Exit(0) #_____________________________________________________________________________ def _Init(): "Initialize Cmtest" if Globals.Debug : print("In this Function %s" % __name__) global Linux_gbl global Erc global Force if not os.name == "nt" : Linux_gbl = 'Ubuntu'; # Added 3/4/10 to support Ubuntu install try: with open ("/etc/*release", r) as fh : for line in fh: if re.search(r"Ubuntu", line) : Linux_gbl = 'Ubuntu' elif re.search(r"Fedora", line) : Linux_gbl = 'Fedora' elif re.search(r"CentOS", line) : Linux_gbl = 'CentOS' else : Linux_gbl = 'unknown'; print ("Un-suported linux type found, I am going to die now") exit() except: print ("Un-suported linux type found, are we Windows? I am going to die now") if not Debug : exit() #else we are NT print ("Debug in _Init %i" % Globals.Debug) Init.Init_All (0) Globals.Erc = 101 Globals.Erc = 0 Init.Init_Also(0) return #____________________________________________________________________________________ def _catch_zap(): global shucks; shucks +=1 Power ('OFF'); Globals.Stats['Status'] = 'Aborted'; Exit(998,"<Ctrl>-C Aborted"); #____________________________________________________________________________________ if __name__ == "__main__": main()
OpenGov/python_data_wrap
tests/table_wrap_test.py
Python
lgpl-2.1
4,724
0.005292
# This import fixes sys.path issues from . import parentpath from datawrap import tablewrap import unittest class TableWrapTest(unittest.TestCase): ''' Tests the capability to wrap 2D objects in Tables and transpose them. ''' def setUp(self): # self.table doesn't need the tablewrap.Table object to work # but this tests both wrappers at once self.table = tablewrap.Table([[1,2,3,4,5], [6,7,8,9,10], ['a','b','c','d','e']]) self.transpose = tablewrap.TableTranspose(self.table) def test_table_transpose(self): self.assertEqual(self.transpose[0][0], self.table[0][0]) self.assertEqual(self.transpose[4][0], self.table[0][4]) self.assertEqual(self.transpose[0][2], self.table[2][0]) self.assertEqual(self.transpose[4][2], self.table[2][4]) self.assertEqual(self.transpose[-1][-1], self.table[-1][-1]) self.assertEqual(self.transpose[-2][-3], self.table[-3][-2]) for c,col in enumerate(self.transpose): for r,elem in enumerate(col): self.assertEqual(elem, self.table[r][c]) def test_table_slice(self): # Try copy slice requests self.assertEqual(self.transpose[:][0][0], self.table[0][0]) self.assertEqual(self.transpose[:][4][0], self.table[0][4]) self.assertEqual(self.transpose[:][0][2], self.table[2][0]) self.assertEqual(self.transpose[:][4][2], self.table[2][4]) # Make a change self.transpose[:][0][2] = 'new' self.assertEqual(self.transpose[0][2], 'new') self.assertEqual(self.table[2][0], 'new') # Try some different slices tslice = slice(1, None, 2) translice = self.transpose[tslice] self.assertEqual(len(translice), 2) self.assertEqual(len(translice[0]), 3) translice[0][0] = 'new2' self.assertEqual(translice[0][0], self.table[0][tslice][0])
self.assertEqual(translice[1][0], self.table[0][tslice][1]) self.assertEqual(translice[0][2], self.table[2][tslice][0]) self.assertEqual(translice[1][2], self.table[2][tslice][1]) tslice = slice(None, 1, None) translice = self.transpose[tslice] self.assertEqual(len(translice), 1) self.assertEqual(len(translice[0]), 3) translice[0][0] = 'new3' self.assertEqual(transli
ce[0][0], self.table[0][tslice][0]) self.assertEqual(translice[0][2], self.table[2][tslice][0]) def test_verify(self): # Check that valid finds bad tables bad_table = [[1, 2, 3], ['a', 'b'], [4, 5, 6]] self.assertRaises(ValueError, lambda: tablewrap.Table(bad_table, True, False)) self.assertRaises(ValueError, lambda: tablewrap.TableTranspose(bad_table, True, False)) bad_table = [[1], ['a', 'b'], [4, 5, 6]] self.assertRaises(ValueError, lambda: tablewrap.Table(bad_table, True, False)) self.assertRaises(ValueError, lambda: tablewrap.TableTranspose(bad_table, True, False)) bad_table = [[1, 2], ['a'], [4]] self.assertRaises(ValueError, lambda: tablewrap.Table(bad_table, True, False)) self.assertRaises(ValueError, lambda: tablewrap.TableTranspose(bad_table, True, False)) bad_table = [[1, 2], ['a'], [4]] noCheck = tablewrap.TableTranspose(bad_table, False, False) # If we don't do validity checks and instead access a bad index... self.assertRaises(IndexError, lambda: noCheck[2][1]) def test_repair(self): # Check that valid finds bad tables bad_table = [[1, 2, 3], ['a', 'b'], [4, 5, 6]] self.assertRaises(ValueError, lambda: tablewrap.Table(bad_table, True, False)) # Neither of thse should explode table = tablewrap.Table(bad_table, False, True) self.assertIsNone(table[1][2]) self.assertIsNone(bad_table[1][2]) bad_table = [[1, 2, 3], ['a', 'b'], [4, 5, 6]] table = tablewrap.Table(bad_table, True, True) self.assertIsNone(table[1][2]) self.assertIsNone(bad_table[1][2]) # Check that valid finds bad tables bad_table = [[1, 2, 3], ['a', 'b'], [4, 5, 6]] self.assertRaises(ValueError, lambda: tablewrap.TableTranspose(bad_table, True, False)) # Neither of thse should explode transpose = tablewrap.TableTranspose(bad_table, False, True) self.assertIsNone(transpose[2][1]) self.assertIsNone(bad_table[1][2]) bad_table = [[1, 2, 3], ['a', 'b'], [4, 5, 6]] transpose = tablewrap.TableTranspose(bad_table, True, True) self.assertIsNone(transpose[2][1]) self.assertIsNone(bad_table[1][2]) if __name__ == '__main__': unittest.main()
klahnakoski/TestLog-ETL
vendor/mo_collections/persistent_queue.py
Python
mpl-2.0
7,580
0.002111
# encoding: utf-8 # # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Contact: Kyle Lahnakoski (kyle@lahnakoski.com) # from __future__ import absolute_import, division, unicode_literals from mo_dots import Data, wrap from mo_files import File import mo_json from mo_logs import Log from mo_logs.exceptions import suppress_exception from mo_math.randoms import Random from mo_threads import Lock, Signal, THREAD_STOP DEBUG = True class PersistentQueue(object): """ THREAD-SAFE, PERSISTENT QUEUE CAN HANDLE MANY PRODUCERS, BUT THE pop(), commit() IDIOM CAN HANDLE ONLY ONE CONSUMER. IT IS IMPORTANT YOU commit() or close(), OTHERWISE NOTHING COMES OFF THE QUEUE """ def __init__(self, _file): """ file - USES FILE FOR PERSISTENCE """ self.file = File.new_instance(_file) self.lock = Lock("lock for persistent queue using file " + self.file.name) self.please_stop = Signal() self.db = Data() self.pending = [] if self.file.exists: for line in self.file: with suppress_exception: delta = mo_json.json2value(line) apply_delta(self.db, delta) if self.db.status.start == None: # HAPPENS WHEN ONLY ADDED TO QUEUE, THEN CRASH self.db.status.start = 0 self.start = self.db.status.start # SCRUB LOST VALUES lost = 0 for k in self.db.keys(): with suppress_exception: if k!="status" and int(k) < self.start: self.db[k] = None lost += 1 # HAPPENS FOR self.db.status, BUT MAYBE OTHER PROPERTIES TOO if lost: Log.warning("queue file had {{num}} items lost", num= lost) DEBUG and Log.note("Persistent queue {{name}} found with {{num}} items", name=self.file.abspath, num=len(self)) else: self.db.status = Data( start=0, end=0 ) self.start = self.db.status.start DEBUG and Log.note("New persistent queue {{name}}", name=self.file.abspath) def _add_pending(self, delta): delta = wrap(delta) self.pending.append(delta) def _apply_pending(self): for delta in self.pending: apply_delta(self.db, delta) self.pending = [] def __iter__(self): """ BLOCKING ITERATOR """ while not self.please_stop: try: value = self.pop() if value is not THREAD_STOP: yield value except Exception as e: Log.warning("Tell me about what happened here", cause=e) def add(self, value): with self.lock: if self.closed: Log.error("Queue is closed") if value is THREAD_STOP: DEBUG and Log.note("Stop is seen in persistent queue") self.please_stop.go() return self._add_pending({"add": {str(self.db.status.end): value}}) self.db.status.end += 1 self._add_pending({"add": {"status.end": self.db.status.end}}) self._commit() return self def __len__(self): with self.lock: return self.db.status.end - self.start
def __getitem__(self, item): return self.db[str(item + self.start)] def pop(self, timeout=None): """ :param timeout: OPTIONAL DURATION :return: None, IF timeout PASSES """ with self.lock: while not self.please_stop: if self.db.status.end > self.start:
value = self.db[str(self.start)] self.start += 1 return value if timeout is not None: with suppress_exception: self.lock.wait(timeout=timeout) if self.db.status.end <= self.start: return None else: self.lock.wait() DEBUG and Log.note("persistent queue already stopped") return THREAD_STOP def pop_all(self): """ NON-BLOCKING POP ALL IN QUEUE, IF ANY """ with self.lock: if self.please_stop: return [THREAD_STOP] if self.db.status.end == self.start: return [] output = [] for i in range(self.start, self.db.status.end): output.append(self.db[str(i)]) self.start = self.db.status.end return output def rollback(self): with self.lock: if self.closed: return self.start = self.db.status.start self.pending = [] def commit(self): with self.lock: if self.closed: Log.error("Queue is closed, commit not allowed") try: self._add_pending({"add": {"status.start": self.start}}) for i in range(self.db.status.start, self.start): self._add_pending({"remove": str(i)}) if self.db.status.end - self.start < 10 or Random.range(0, 1000) == 0: # FORCE RE-WRITE TO LIMIT FILE SIZE # SIMPLY RE-WRITE FILE if DEBUG: Log.note("Re-write {{num_keys}} keys to persistent queue", num_keys=self.db.status.end - self.start) for k in self.db.keys(): if k == "status" or int(k) >= self.db.status.start: continue Log.error("Not expecting {{key}}", key=k) self._commit() self.file.write(mo_json.value2json({"add": self.db}) + "\n") else: self._commit() except Exception as e: raise e def _commit(self): self.file.append("\n".join(mo_json.value2json(p) for p in self.pending)) self._apply_pending() def close(self): self.please_stop.go() with self.lock: if self.db is None: return self.add(THREAD_STOP) if self.db.status.end == self.start: DEBUG and Log.note("persistent queue clear and closed") self.file.delete() else: DEBUG and Log.note("persistent queue closed with {{num}} items left", num=len(self)) try: self._add_pending({"add": {"status.start": self.start}}) for i in range(self.db.status.start, self.start): self._add_pending({"remove": str(i)}) self.file.write(mo_json.value2json({"add": self.db}) + "\n" + ("\n".join(mo_json.value2json(p) for p in self.pending)) + "\n") self._apply_pending() except Exception as e: raise e self.db = None @property def closed(self): with self.lock: return self.db is None def apply_delta(value, delta): if delta.add: for k, v in delta.add.items(): value[k] = v elif delta.remove: value[delta.remove] = None
troismph/matasano-challenges
src/challenge48.py
Python
gpl-3.0
78
0
from
challenge47 import bleichenbacher def crack()
: bleichenbacher(768)
vpramo/contrail-neutron-plugin
neutron_plugin_contrail/plugins/opencontrail/vnc_client/router_res_handler.py
Python
apache-2.0
24,484
0
# Copyright 2015. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cfgm_common import exceptions as vnc_exc import contrail_res_handler as res_handler import netaddr from neutron.common import constants as n_constants from neutron.common import exceptions as n_exceptions import subnet_res_handler as subnet_handler import vmi_res_handler as vmi_handler from vnc_api import vnc_api SNAT_SERVICE_TEMPLATE_FQ_NAME = ['default-domain', 'netns-snat-template'] class LogicalRouterMixin(object): @staticmethod def _get_external_gateway_info(rtr_obj): vn_refs = rtr_obj.get_virtual_network_refs() if vn_refs: return vn_refs[0]['uuid'] def _neutron_dict_to_rtr_obj(self, router_q, rtr_obj): rtr_name = router_q.get('name') id_perms = rtr_obj.get_id_perms() if 'admin_state_up' in router_q: id_perms.enable = router_q['admin_state_up'] rtr_obj.set_id_perms(id_perms) if rtr_name: rtr_obj.display_name = rtr_name return rtr_obj def _rtr_obj_to_neutron_dict(self, rtr_obj, contrail_extensions_enabled=True, fields=None): rtr_q_dict = {} rtr_q_dict['id'] = rtr_obj.uuid if not rtr_obj.display_name: rtr_q_dict['name'] = rtr_obj.get_fq_name()[-1] else: rtr_q_dict['name'] = rtr_obj.display_name rtr_q_dict['tenant_id'] = self._project_id_vnc_to_neutron( rtr_obj.parent_uuid) rtr_q_dict['admin_state_up'] = rtr_obj.get_id_perms().enable rtr_q_dict['shared'] = False rtr_q_dict['status'] = n_constants.NET_STATUS_ACTIVE rtr_q_dict['gw_port_id'] = None ext_net_uuid = self._get_external_gateway_info(rtr_obj) if not ext_net_uuid: rtr_q_dict['external_gateway_info'] = None else: rtr_q_dict['external_gateway_info'] = {'network_id': ext_net_uuid, 'enable_snat': True} if contrail_extensions_enabled: rtr_q_dict.update({'contrail:fq_name': rtr_obj.get_fq_name()}) if fields: rtr_q_dict = self._filter_res_dict(rtr_q_dict, fields) return rtr_q_dict def _router_add_gateway(self, router_q, rtr_obj): ext_gateway = router_q.get('external_gateway_info') old_ext_gateway = self._get_external_gateway_info(rtr_obj) if ext_gateway or old_ext_gateway: network_id = None if ext_gateway: network_id = ext_gateway.get('network_id') if network_id: if old_ext_gateway and network_id == old_ext_gateway: return try: vn_obj = self._vnc_lib.virtual_network_read(id=network_id) if not vn_obj.get_router_external(): self._raise_contrail_exception( 'BadRequest', resource='router', msg="Network %s is not a valid " "external network" % network_id) except vnc_exc.NoIdError: self._raise_contrail_exception('NetworkNotFound', net_id=network_id) self._router_set_external_gateway(rtr_obj, vn_obj) else: self._router_clear_external_gateway(rtr_obj) def _router_set_external_gateway(self, router_obj, ext_net_obj): router_obj.set_virtual_network(ext_net_obj) self._vnc_lib.logical_router_update(router_obj) def _router_clear_external_gateway(self, router_obj): router_obj.set_virtual_network_list([]) self._vnc_lib.logical_router_update(router_obj) def _set_snat_routing_table(self, router_obj, network_id): project_obj = self._project_read(proj_id=router_obj.parent_uuid) rt_name = 'rt_' + router_obj.uuid rt_fq_name = project_obj.get_fq_name() + [rt_name] try: rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name) except vnc_exc.NoIdError: # No route table set with that router ID, the gateway is not set return try: vn_obj = self._vnc_lib.virtual_network_read(id=network_id) except vnc_exc.NoIdError: self._raise_contrail_exception( 'NetworkNotFound', net_id=network_id) vn_obj.set_route_table(rt_obj) self._vnc_lib.virtual_network_update(vn_obj) def _clear_snat_routing_table(self, router_obj, network_id): project_obj = self._project_read(proj_id=router_obj.parent_uuid) rt_name = 'rt_' + router_obj.uuid rt_fq_name = project_obj.get_fq_name() + [rt_name] try: rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name) except vnc_exc.NoIdError: # No route table set with that router ID, the gateway is not set return try: vn_obj = self._vnc_lib.virtual_network_read(id=network_id) except vnc_exc.NoIdError: self._raise_contrail_exception( 'NetworkNotFound', net_id=network_id) vn_obj.del_route_table(rt_obj) self._vnc_lib.virtual_network_update(vn_obj) class LogicalRouterCreateHandler(res_handler.ResourceCreateHandler, LogicalRouterMixin): resource_create_method = 'logical_router_create' def _create_router(self, router_q): project_id = self._project_id_neutron_to_vnc(router_q['tenant_id']) project_obj = self._project_read(proj_id=project_id) id_perms = vnc_api.IdPermsType(enable=True) return vnc_api.LogicalRouter(router_q.get('name'), project_obj, id_perms=id_perms) def resource_create(self, context, router_q): rtr_obj = self._neutron_dict_to_rtr_obj( router_q, self._create_rout
er(router_q)) rtr_uuid = self._resource_create(rtr_obj) contrail_extensions_enabled = self._kwargs.get( 'contrail_extensions_enabled',
False) # read it back to update id perms rtr_obj = self._resource_get(id=rtr_uuid) self._router_add_gateway(router_q, rtr_obj) return self._rtr_obj_to_neutron_dict( rtr_obj, contrail_extensions_enabled=contrail_extensions_enabled) class LogicalRouterDeleteHandler(res_handler.ResourceDeleteHandler, LogicalRouterMixin): resource_delete_method = 'logical_router_delete' def resource_delete(self, context, rtr_id): try: rtr_obj = self._resource_get(id=rtr_id) if rtr_obj.get_virtual_machine_interface_refs(): self._raise_contrail_exception('RouterInUse', router_id=rtr_id) except vnc_exc.NoIdError: self._raise_contrail_exception('RouterNotFound', router_id=rtr_id) self._router_clear_external_gateway(rtr_obj) try: self._resource_delete(id=rtr_id) except vnc_exc.RefsExistError: self._raise_contrail_exception('RouterInUse', router_id=rtr_id) class LogicalRouterUpdateHandler(res_handler.ResourceUpdateHandler, LogicalRouterMixin): resource_update_method = 'logical_router_update' def _get_rtr_obj(self, router_q): return self._resource_get(id=router_q.get('id')) def resource_update(self,
Yenthe666/Odoo_Samples
sale/sale.py
Python
agpl-3.0
69,258
0.005761
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from datetime import datetime, timedelta import time from openerp.osv import fields, osv from openerp.tools.translate import _ from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT import openerp.addons.decimal_precision as dp from openerp import workflow class res_company(osv.Model): _inherit = "res.company" _columns = { 'sale_note': fields.text('Default Terms and
Conditions', translate=True, help="Default terms and conditions for quotations."), } class sale_order(osv.osv): _name = "sale.order" _inherit = ['mail.thread', 'ir.needaction_mixin'] _description = "Sales Order" _track = { 'state': { 'sale.mt_order_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state in ['manual'],
'sale.mt_order_sent': lambda self, cr, uid, obj, ctx=None: obj.state in ['sent'] }, } def _amount_line_tax(self, cr, uid, line, context=None): val = 0.0 for c in self.pool.get('account.tax').compute_all(cr, uid, line.tax_id, line.price_unit * (1-(line.discount or 0.0)/100.0), line.product_uom_qty, line.product_id, line.order_id.partner_id)['taxes']: val += c.get('amount', 0.0) return val def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None): """ Wrapper because of direct method passing as parameter for function fields """ return self._amount_all(cr, uid, ids, field_name, arg, context=context) def _amount_all(self, cr, uid, ids, field_name, arg, context=None): cur_obj = self.pool.get('res.currency') res = {} for order in self.browse(cr, uid, ids, context=context): res[order.id] = { 'amount_untaxed': 0.0, 'amount_tax': 0.0, 'amount_total': 0.0, } val = val1 = 0.0 cur = order.pricelist_id.currency_id for line in order.order_line: val1 += line.price_subtotal val += self._amount_line_tax(cr, uid, line, context=context) res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val) res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1) res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax'] return res def _invoiced_rate(self, cursor, user, ids, name, arg, context=None): res = {} for sale in self.browse(cursor, user, ids, context=context): if sale.invoiced: res[sale.id] = 100.0 continue tot = 0.0 for invoice in sale.invoice_ids: if invoice.state not in ('draft', 'cancel'): tot += invoice.amount_untaxed if tot: res[sale.id] = min(100.0, tot * 100.0 / (sale.amount_untaxed or 1.00)) else: res[sale.id] = 0.0 return res def _invoice_exists(self, cursor, user, ids, name, arg, context=None): res = {} for sale in self.browse(cursor, user, ids, context=context): res[sale.id] = False if sale.invoice_ids: res[sale.id] = True return res def _invoiced(self, cursor, user, ids, name, arg, context=None): res = {} for sale in self.browse(cursor, user, ids, context=context): res[sale.id] = True invoice_existence = False for invoice in sale.invoice_ids: if invoice.state!='cancel': invoice_existence = True if invoice.state != 'paid': res[sale.id] = False break if not invoice_existence or sale.state == 'manual': res[sale.id] = False return res def _invoiced_search(self, cursor, user, obj, name, args, context=None): if not len(args): return [] clause = '' sale_clause = '' no_invoiced = False for arg in args: if (arg[1] == '=' and arg[2]) or (arg[1] == '!=' and not arg[2]): clause += 'AND inv.state = \'paid\'' else: clause += 'AND inv.state != \'cancel\' AND sale.state != \'cancel\' AND inv.state <> \'paid\' AND rel.order_id = sale.id ' sale_clause = ', sale_order AS sale ' no_invoiced = True cursor.execute('SELECT rel.order_id ' \ 'FROM sale_order_invoice_rel AS rel, account_invoice AS inv '+ sale_clause + \ 'WHERE rel.invoice_id = inv.id ' + clause) res = cursor.fetchall() if no_invoiced: cursor.execute('SELECT sale.id ' \ 'FROM sale_order AS sale ' \ 'WHERE sale.id NOT IN ' \ '(SELECT rel.order_id ' \ 'FROM sale_order_invoice_rel AS rel) and sale.state != \'cancel\'') res.extend(cursor.fetchall()) if not res: return [('id', '=', 0)] return [('id', 'in', [x[0] for x in res])] def _get_order(self, cr, uid, ids, context=None): result = {} for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context): result[line.order_id.id] = True return result.keys() def _get_default_company(self, cr, uid, context=None): company_id = self.pool.get('res.users')._get_company(cr, uid, context=context) if not company_id: raise osv.except_osv(_('Error!'), _('There is no default company for the current user!')) return company_id def _get_default_section_id(self, cr, uid, context=None): """ Gives default section by checking if present in the context """ section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False if not section_id: section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False return section_id def _resolve_section_id_from_context(self, cr, uid, context=None): """ Returns ID of section based on the value of 'section_id' context key, or None if it cannot be resolved to a single Sales Team. """ if context is None: context = {} if type(context.get('default_section_id')) in (int, long): return context.get('default_section_id') if isinstance(context.get('default_section_id'), basestring): section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context) if len(section_ids) == 1: return int(section_ids[0][0]) return None #This function automatically sets the currency to EUR. def _get_default_currency(self, cr, uid, context=None): res = self.pool.get('res.company').search(cr, uid, [('currency_id','=','EUR')], context=context) return res and res[0] or False _columns = { #This fills the Many2one with all data from res.currency 'currency_id_inv
arruda/cloudfuzzy
manage.py
Python
mit
253
0
#!/usr/bin/env python
import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cloudfuzzy.settings") from django.core.management import execute_from_command_line execute_from_comm
and_line(sys.argv)
kdungs/python-mcheck
mcheck/__init__.py
Python
mit
1,840
0
""" Define a Check monad and corresponding functions. """ from functools import (reduce, partial) class Check: """ This super class is not really necessary but helps make the structure clear. data Check a = Pass a | Fai
l Message """ pass class Pass(Check): def __init__(self, value): self.value = value class Fail(Check): def __init__(self, message): self.message = message def is_(t, x): """ Check whether the type of a given x is a given type t. """ return type(x) is t is_check = partial(is_, Check) is_pass = partial(is_, Pass) is_fail = partial(is_, Fail) def return_(x): """ Monadic return for the Check monad. return ::
a -> m a return = Pass """ return Pass(x) def bind(f): """ Monadic bind for the Check monad. (>>=) :: m a -> (a -> m b) -> m b Fail x >>= f = Fail x Pass x >>= f = f x """ def bind_impl(x): if is_fail(x): return x if is_pass(x): return f(x.value) raise ValueError('Check has to be of type Pass | Fail.') return bind_impl def compose(f, g): """ Kleisli composition of two (Check-)monadic functions f and g. (>=>) :: (a -> m b) -> (b -> m c) -> (a -> m c) """ def compose_impl(x): return bind(g)(f(x)) return compose_impl def compose_many(*fs): """ Reduces a variable number of functions with composition. Same as repeatedly calling `compose` on pairs. """ return reduce(compose, fs) def lift(f, message): """ Lifts a boolean function into the realm of the Check monad. lift :: (a -> bool) -> String -> (a -> Check a) """ def lift_impl(x): if f(x): return return_(x) return Fail(message) return lift_impl
2B5/ia-3B5
module3/preprocessing/errorCorrect.py
Python
mit
232
0.038793
fro
m textblob import TextBlob,Word def correct(text): t = TextBlob(text) return str(t.correct()) def spellcheck(text): txt=["She","is","mw","moom"] for w in txt: word=Word(w) print(word.spellche
ck())
FRidh/python-acoustics
tests/test_power.py
Python
bsd-3-clause
637
0
from __future__ import division import numpy as np from numpy.testing import assert_almost_equal import pytest from acoustics.power import lw_iso3746 @pytest.mark.parametrize("background_noise, expected", [ (79, 91.153934187), (83, 90.187405234),
(88, 88.153934187), ]) def test_lw_iso3746(background_noise, expected): LpAi = np.array([90, 90, 90, 90]) LpAiB = background_noise * np.ones(4) S = 10 alpha = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) surfaces = np.ar
ray([10, 10, 10, 10, 10, 10]) calculated = lw_iso3746(LpAi, LpAiB, S, alpha, surfaces) assert_almost_equal(calculated, expected)
mavlyutovrus/interval_index
python/create_datasets.py
Python
apache-2.0
14,048
0.003702
import random from numpy.random import normal, uniform import numpy as np import math from heapq import heapify, heappush, heappop import os MIN = 0 MAX = 10000000 POINTS_COUNT = 1000000 QUERIES_COUNT = 200000 def save_dataset(filename, intervals, queries): intervals_copy = [value for value in intervals] queries_copy = [value for value in queries] random.shuffle(intervals_copy) random.shuffle(queries_copy) out = open(filename, "w") out.write(str(len(intervals_copy)) + "\n") for index in xrange(len(intervals_copy)): start, length = intervals_copy[index] out.write(str(start) + "\t" + str(start + length) + "\t" + str(index + 1) + "\n") out.write(str(len(queries_copy)) + "\n") for start, length in queries_copy: out.write(str(start) + "\t"
+ str(start + length) + "\n") out.close() if 1: # chi_time_mem len_mean = 100 len_stdev = 10 intervals = [] queries = [] lengths = [length >=0 and length or 0.0 for length in normal(len_mean, len_stdev, POINTS_COUN
T)] for point_index in xrange(POINTS_COUNT): start = random.random() * (MAX - MIN) + MIN length = lengths[point_index] intervals += [(start, length)] intervals.sort() overlappings = [] started = [] for start, length in intervals: while started: right_border = heappop(started) if right_border >= start: heappush(started, right_border) break overlappings += [len(started)] heappush(started, start + length) avg_overlapping = sum(overlappings) / float(len(overlappings)) print "avg overlapping", avg_overlapping QUERIES_COUNT_SPEC = 1000000 query_len_mean = 100 lengths = normal(query_len_mean, len_stdev, QUERIES_COUNT_SPEC) queries = [] for point_index in xrange(QUERIES_COUNT_SPEC): start = random.random() * (MAX - MIN) + MIN queries += [(start, lengths[point_index])] save_dataset("datasets/chi_time_mem_1M_100_1M_100.txt", intervals, queries) if 1: # query_len len_mean = 100 len_stdev = 10 query_len = 1 intervals = [] queries = [] lengths = [length >=0 and length or 0.0 for length in normal(len_mean, len_stdev, POINTS_COUNT)] for point_index in xrange(POINTS_COUNT): start = random.random() * (MAX - MIN) + MIN length = lengths[point_index] intervals += [(start, length)] intervals.sort() overlappings = [] started = [] for start, length in intervals: while started: right_border = heappop(started) if right_border >= start: heappush(started, right_border) break overlappings += [len(started)] heappush(started, start + length) avg_overlapping = sum(overlappings) / float(len(overlappings)) lengths = normal(100, 10, QUERIES_COUNT) DATASETS_COUNT = 30 query_length = 10 factor = math.exp(math.log(10000 / float(query_length) ) / (DATASETS_COUNT - 1)) for length_factor in xrange(DATASETS_COUNT): queries = [] for point_index in xrange(QUERIES_COUNT): start = random.random() * (MAX - MIN) + MIN queries += [(start, query_length)] save_dataset("datasets/query_len/dataset_query_len_%d.txt" % (query_length), intervals, queries) print query_length query_length = math.ceil(query_length * factor) if 0: # avg_overlapping queries = [] for query_index in xrange(QUERIES_COUNT): start = random.random() * (MAX - MIN) + MIN length = 100 queries += [(start, length)] len_mean = 1 max_len = 100000 DATASETS_COUNT = 30 factor = math.exp(math.log(max_len / float(len_mean) ) / (DATASETS_COUNT - 1)) while len_mean <= 100000: print "mean len:", len_mean if 1: intervals = [] lengths = [length >=0 and length or 0.0 for length in normal(len_mean, len_mean / 20.0, POINTS_COUNT)] if len_mean == 1: #here we want overlapping to be zero lengths = [0 for l in lengths] for interval_index in xrange(POINTS_COUNT): start = random.random() * (MAX - MIN) + MIN length = lengths[interval_index] intervals += [(start, length)] intervals.sort() overlappings = [] started = [] for start, length in intervals: while started: right_border = heappop(started) if right_border >= start: heappush(started, right_border) break overlappings += [len(started)] heappush(started, start + length) avg_overlapping = sum(overlappings) / float(len(overlappings)) print sum(overlappings) print "avg. overlapping", avg_overlapping save_dataset("../datasets/avg_overlapping/%f.txt" % (avg_overlapping), intervals, queries) len_mean = math.ceil(len_mean * factor) if 0: # avg_overlapping standard deviation queries = [] for query_index in xrange(QUERIES_COUNT): start = random.random() * (MAX - MIN) + MIN length = 100 queries += [(start, length)] len_mean = 10000 DATASETS_COUNT = 30 radius = 0 max_radius = len_mean delta = (max_radius - radius) / (float(DATASETS_COUNT - 1)) for _ in xrange(20): print "radius:", radius if 0: intervals = [] lengths = [length >=0 and length or 0.0 for length in uniform(len_mean - radius, len_mean + radius, POINTS_COUNT)] print min(lengths), lengths[:15] for interval_index in xrange(POINTS_COUNT): start = random.random() * (MAX - MIN) + MIN length = lengths[interval_index] intervals += [(start, length)] intervals.sort() overlappings = [] started = [] for start, length in intervals: while started: right_border = heappop(started) if right_border >= start: heappush(started, right_border) break overlappings += [len(started)] heappush(started, start + length) avg_overlapping = sum(overlappings) / float(len(overlappings)) print sum(overlappings) print "avg. overlapping", avg_overlapping save_dataset("../datasets/avg_overlapping_stdev/%f.txt" % (2 * radius), intervals, queries) radius += delta if 0: # different number of intervals intervals_counts = [10000] for _ in xrange(50): intervals_counts += [int(1.15 * intervals_counts[-1])] max_values = [counts for counts in intervals_counts] interval_length = 10 for dataset_index in xrange(len(intervals_counts)): intervals_count = intervals_counts[dataset_index] MAX = max_values[dataset_index] intervals = [] for _ in xrange(intervals_count): start = random.random() * MAX intervals += [(start, interval_length)] if intervals_count < 10000000: intervals.sort() overlappings = [] started = [] for start, length in intervals: while started: right_border = heappop(started) if right_border >= start: heappush(started, right_border) break overlappings += [len(started)] heappush(started, start + length) avg_overlapping = sum(overlappings) / float(len(overlappings)) print sum(overlappings) print "avg. overlapping", avg_overlapping queries = [] for query_index in xrange(QUERIES_COUNT): start = random.random() * MAX length = 1000 queries += [(start, length)] print "intervals_count", intervals_count
IdeaSolutionsOnline/ERP4R
core/objs/sr_mulher.py
Python
mit
2,051
0.040236
# !/usr/bin/env python3 # -*- encoding: utf-8 -*- """ ERP+ """ __author__ = 'CVtek dev' __credits__ = [] __version__ = "1.0" __maintainer__ = "CVTek dev" __status__ = "Development" __model_name__ = 'sr_mulher.SRMulher' import auth, base_models from orm import * from form import * class SRMulher(Model, View): def __init__(self, **kargs): Model.__init__(self, **kargs) self.__name__ = 'sr_mulher' self.__title__ ='Inscrição e Identificação da Mulher' self.__model_name__ = __model_name__ self.__list_edit_mode__ = 'edit' self.__get_options__ = ['nome'] # define tambem o campo a ser mostrado no m2m, independentemente da descricao no field do m2m self.__order_by__ = 'sr_mulher.nome' #choice field com a estrutura de saude self.numero_inscricao = integer_field(view_order = 1, name = 'Nº de Inscrição', size = 40) self.nome = string_field(view_order = 2, name = 'Nome Completo', size = 70, onlist = True) self.data_nascimento = date_field(view_order = 3, name = 'Data Nascimento', size=40, args = 'required', onlist = True) self.escolaridade = combo_field(view_order = 4, name = 'Escolaridade', size = 40, default = '', options = [('analfabeta','Analfabeta'), ('primaria','Primária'), ('secundaria','Secundária'), ('mais','Mais')], onlist = True) self.telefone = string_field(view_order = 5, name = 'Telefone', size = 40, onlist = True) self.endereco_familia = text_field(view_order=6, name='Endereço Familia', size=70, args="rows=30",
onlist=False, search=False) self.endereco_actual = text_field(view_order=7, name='Endereço Fixo Actual', size=70, args="rows=30", onlist=False, search=False) self.observacoes = text_field(view_order=8, name='Observações', size=80, args="rows=30", onlist=False, search=False) self.estado = combo_field(view_order = 9, name = 'Estado', size = 40, default = 'active', options = [('active','Activo'), ('canceled','Cancelado')], onlist = True)