repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Greymerk/python-rpg
|
src/entity/mobs/snake.py
|
Python
|
gpl-3.0
| 592
| 0.037162
|
'''
Created on 2013-05-16
@author: brian
'''
import pygame
from entity import Entity
from src.ai import task
from src.abil
|
ities import *
class Snake(Entity):
living = "snake"
dead = "gore"
def __init__(self, world):
Entity.__init__(self, world)
self.world = world
self.hostile = True
self.health = self.maxHealth = 15
self.ai.addAI(task.Flee(self))
self.ai.addAI(task.Cast(self))
self.ai.addAI(task.Pursue(self))
self.ai.addAI
|
(task.Wander(self))
self.singular = 'a snake'
def equip(self):
self.abilities = [Ability(self, Ability.lookup["PoisonBolt"])]
|
benedictpaten/cactus
|
src/cactus/blast/upconvertCoordinates.py
|
Python
|
mit
| 4,797
| 0.001876
|
#!/usr/bin/env python
from argparse import ArgumentParser
from collections import defaultdict
import sys
import os
from sonLib.bioio import cigarRead, cigarWrite, getTempFile, system
def getSequenceRanges(fa):
"""Get dict of (untrimmed header) -> [(start, non-inclusive end)] mappings
from a trimmed fasta."""
ret = defaultdict(list)
curSeq = ""
curHeader = None
curTrimmedStart = None
for line in fa:
line = line.strip()
if line == '':
continue
if line[0] == '>':
if curHeader is not None:
# Add previous seq info to dict
trimmedRange = (curTrimmedStart,
curTrimmedStart + len(curSeq))
untrimmedHeader = "|".join(curHeader.split("|")[:-1])
ret[untrimmedHeader].append(trimmedRange)
curHeader = line[1:].split()[0]
curTrimmedStart = int(curHeader.split('|')[-1])
curSeq = ""
else:
curSeq += line
if curHeader is not None:
# Add final seq info to dict
trimmedRange = (curTrimmedStart,
curTrimmedStart + len(curSeq))
untrimmedHeader = "|".join(curHeader.split("|")[:-1])
ret[untrimmedHeader].append(trimmedRange)
for key in ret.keys():
# Sort by range's start pos
ret[key] = sorted(ret[key], key=lambda x: x[0])
return ret
def validateRanges(seqRanges):
"""Fail if the given range dict contains overlapping ranges or if the
ranges aren't sorted.
"""
for seq, ranges in seqRanges.items():
for i, range in enumerate(ranges):
start = range[0]
if i - 1 >= 0:
range2 = ranges[i - 1]
assert start >= range2[1]
if i + 1 < len(ranges):
range2 = ranges[i + 1]
assert start < range2[0]
def sortCigarByContigAndPos(cigarPath, contigNum):
contigNameKey = 2 if contigNum == 1 else 6
startPosKey = 3 if contigNum == 1 else 7
tempFile = getTempFile()
system("sort -k %d,%d -k %d,%dn %s > %s" % (contigNameKey, contigNameKey, startPosKey, startPosKey, cigarPath, tempFile))
return tempFile
def upconvertCoords(cigarPath, fastaPath, contigNum, outputFile):
"""Convert the coordinates of the given alignment, so that the
alignment refers to a set of trimmed sequences originating from a
contig rather than to the contig itself."""
with open(fastaPath) as f:
seqRanges = getSequenceRanges(f)
validateRanges(seqRanges)
sortedCigarPath = sortCigarByContigAndPos(cigarPath, contigNum)
sortedCigarFile = open(sortedCigarPath)
currentContig = None
currentRangeIdx = None
currentRange = None
for alignment in cigarRead(sortedCigarFile):
# contig1 and contig2 are reversed in python api!!
contig = alignment.contig2 if contigNum == 1 else alignment.contig1
minPos = min(alignment.start2, alignment.end2) if contigNum == 1 else min(alignment.start1, alignment.end1)
maxPos = max(alignment.start2, alignment.end2) if contigNum == 1 else max(alignment.start1, alignment.end1)
if contig in seqRanges:
if contig != currentContig:
currentContig = contig
currentRangeIdx = 0
currentRange = seqRanges[contig][0]
while (minPos >= currentRange[1] or minPos < currentRange[0]) and currentRangeIdx < len(seqRanges[contig]) - 1:
currentRangeIdx += 1
currentRange = seqRanges[contig][currentRangeIdx]
if currentRange[0] <= minPos < currentRange[1]:
if maxPos - 1 > currentRange[1]:
raise RuntimeError("alignment on %s:%d-
|
%d crosses "
"trimmed sequence boundary" %\
(contig,
minPos,
maxPos))
if contigNum == 1:
alignment.start2 -= currentRange[0]
alignment.end2 -= currentRange[0]
alignment.contig2 = contig + ("|%d" % currentRange[0])
else:
|
alignment.start1 -= currentRange[0]
alignment.end1 -= currentRange[0]
alignment.contig1 = contig + ("|%d" % currentRange[0])
else:
raise RuntimeError("No trimmed sequence containing alignment "
"on %s:%d-%d" % (contig,
minPos,
maxPos))
cigarWrite(outputFile, alignment, False)
os.remove(sortedCigarPath)
|
EricssonResearch/iot-framework-engine
|
semantic-adapter/test_pubsub/test_publisher.py
|
Python
|
apache-2.0
| 1,498
| 0.001335
|
from lib import broker
__author__ = 'ehonlia'
import pika
import logging
logging.basicConfig()
connection = pika.BlockingConnection(pika.ConnectionParameters(host=broker.HOST))
channel = connection.channel()
channel.exchange_declare(exchange=broker.STREAM_EXCHANGE, type=broker.EXCHANGE_TYPE)
message = '''
{
"_index": "sensorcloud",
"_source": {
"polling": false,
"min_val": "0",
"nr_subscribers": 0,
"uri": "",
"name": "[ER Day 2013] Battery North",
"resource": {
"resource_type": "",
"uuid": ""
},
"active": true,
"subscribers": [],
"user_ranking": {
"average": 60,
"nr_rankings": 1
},
"unit": "",
"quality": 1,
"history_size": 6995,
"polling_freq": 0,
"creation_date": "2014-01-09",
"private": false,
"parser": "",
"last_updated": "2014-01-21T16:26:50.000",
"user_id": "user",
"location": {
"lon": 17.949467700000014,
"lat": 59.40325599999999
},
"type": "battery level",
"accuracy": "",
"description": "battery level of the mote on the North pipe (not leaky)",
"data_type": "application/json
|
",
"tags": "battery c
|
harge",
"max_val": "255"
},
"_id": "abcdef",
"_type": "stream",
"_score": 1
}
'''
channel.basic_publish(exchange=broker.STREAM_EXCHANGE,
routing_key=broker.STREAM_ROUTING_KEY,
body=message)
print " [x] Sent %r:%r" % (broker.STREAM_ROUTING_KEY, message)
connection.close()
|
claudelee/Wi-FiTestSuite-UCC
|
python/myutils.py
|
Python
|
isc
| 97,701
| 0.005466
|
###################################################################
#
# Copyright (c) 2014 Wi-Fi Alliance
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
# USE OR PERFORMANCE OF THIS SOFTWARE.
#
###################################################################
from socket import *
from time import gmtime, strftime
import thread, time, Queue, os
import sys, time
from select import select
import logging
import re
import ctypes
import random
import HTML
import json, io, re
import string
import xml.dom.minidom
import threading
from xml.dom.minidom import Document
from xml.dom.minidom import Node
from XMLLogger import XMLLogger
import math
from datetime import datetime
from random import randrange
from xml.dom.minidom import Node
from difflib import SequenceMatcher
VERSION = "9.0.0"
conntable = {}
retValueTable = {}
DisplayNameTable = {}
streamSendResultArray = []
streamRecvResultArray = []
streamInfoArray = []
lhs = []
rhs = []
oper = []
boolOp = []
oplist = []
runningPhase = '1'
testRunning = 0
threadCount = 0
resultPrinted = 0
ifcondBit = 1
ifCondBit = 1
iDNB = 0
iINV = 0
RTPCount = 1
socktimeout = 0
#default socket time out in seconds
deftimeout = 600
#default command file path
MasterTestInfo="\MasterTestInfo
|
.xml"
InitEnv = "\InitEnv.txt"
uccPath = '..\\..\\cmds'
DUTFeatureInfoFile = "./log/DUTFeatureInfo.html"
doc = ""
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLUE = 0x01 # text color contains blue.
FOREGROUND_GREEN = 0x02 # text color contains green.
FOREGROUND_RED = 0x04 # text color contains red.
FOREGROUND_INTENSITY = 0x08 # t
|
ext color is intensified.
#Define extra colours
FOREGROUND_WHITE = FOREGROUND_RED | FOREGROUND_BLUE | FOREGROUND_GREEN
FOREGROUND_YELLOW = FOREGROUND_RED | FOREGROUND_GREEN
FOREGROUND_CYAN = FOREGROUND_BLUE | FOREGROUND_GREEN
FOREGROUND_MAGENTA = FOREGROUND_RED | FOREGROUND_BLUE
#FOREGROUND_WHITE = FOREGROUND_GREEN | FOREGROUND_RED --> this is yellow.
BACKGROUND_BLUE = 0x10 # background color contains blue.
BACKGROUND_GREEN = 0x20 # background color contains green.
BACKGROUND_RED = 0x40 # background color contains red.
BACKGROUND_INTENSITY = 0x80 # background color is intensified.
BACKGROUND_WHITE = BACKGROUND_RED | BACKGROUND_BLUE | BACKGROUND_GREEN
BACKGROUND_YELLOW = BACKGROUND_RED | BACKGROUND_GREEN
BACKGROUND_CYAN = BACKGROUND_BLUE | BACKGROUND_GREEN
BACKGROUND_MAGENTA = BACKGROUND_RED | BACKGROUND_BLUE
std_out_handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
#TMS response packet
class TMSResponse:
#Init variables
def __init__(self, TestResult="N/A", Mode="Sigma", DutParticipantName="Unknown", PrimaryTestbedParticipantName="Unknown" ):
self.TmsEventId =""
self.TestCaseId = ""
self.Mode = Mode
self.Dut = {'company':"", 'model':"", 'firmware':"", 'Category' : "", 'VendorDeviceId' : ""}
self.PrimaryTestbed = {'company':"", 'model':"", 'firmware':"", 'Category' : "", 'VendorDeviceId' : ""}
self.SupplementalTestbeds = []
self.TestResult = TestResult
self.TimeStamp = ""
self.LogFileName = ""
self.ProgramName = ""
self.DutParticipantName = DutParticipantName
self.PrimaryTestbedParticipantName = PrimaryTestbedParticipantName
def __str__(self):
return("\n Test Event ID = [%s] Prog Name = [%s] Test Case = [%s] Dut Name =[%s] Model Number =[%s] Test Result =[%s]" % (self.TmsEventId,self.ProgramName,self.TestCaseId,self.dutName,self.dutModeNumber, self.testResult))
#func to get class to dict
def asDict(self):
return self.__dict__
def Search_MasterTestInfo(self, testID, tag):
"""
Finds the value of given tag in master XML file of Testcase Info (from InitEnv)
Parameters
----------
testID : str
tag : tuple of str
Returns
-------
Tag Value (as per XML file) : str
"""
global MasterTestInfo,doc,uccPath
result=""
doc = xml.dom.minidom.parse(uccPath + MasterTestInfo)
for node in doc.getElementsByTagName(testID):
L = node.getElementsByTagName(tag)
for node2 in L:
for node3 in node2.childNodes:
if node3.nodeType == Node.TEXT_NODE:
result = node3.nodeValue
return result
return result
def writeTMSJson(self, logLoc, logTime):
"""Write JSON for TMS -> grep log file and look for Version Info"""
jsonFname="%s/tms_%s.json" %( logLoc , self.TestCaseId)
convertedTime = time.strptime(logTime, "%b-%d-%Y__%H-%M-%S")
self.TimeStamp = time.strftime('%Y-%m-%dT%H:%M:%SZ', convertedTime)
try :
primaryTB = self.Search_MasterTestInfo(self.TestCaseId, "PrimaryTestbed")
except :
#exception
primaryTB ="n/a"
BulkStorageServer = ""
tmsPATH = './TmsClient.conf'
if(os.path.isfile(tmsPATH)):
with open(tmsPATH, "r") as f:
for line in f:
if re.search(r"TmsEventId=", line):
pos = line.index('=') + 1
str = line[pos:].rstrip('\r\n')
self.TmsEventId = str
if re.search(r"TestbedParticipantName=", line):
pos = line.index('=') + 1
str = line[pos:].rstrip('\r\n')
if primaryTB != "" :
self.PrimaryTestbedParticipantName = str
else:
self.PrimaryTestbedParticipantName = ""
if re.search(r"DutParticipantName=", line):
pos = line.index('=') + 1
str = line[pos:].rstrip('\r\n')
self.DutParticipantName = str
if re.search(r"BulkStorageServer=", line):
pos = line.index('=') + 1
str = line[pos:].rstrip('\r\n')
BulkStorageServer = str
if self.Dut.get('VendorDeviceId') != "":
if self.PrimaryTestbed.get('VendorDeviceId') != "":
self.LogFileName = BulkStorageServer + "/" + self.TmsEventId + "/" + self.Dut.get('VendorDeviceId') + "/" + self.PrimaryTestbed.get('VendorDeviceId') + "/" + self.TestCaseId + "/" + logTime + ".zip"
else:
self.LogFileName = BulkStorageServer + "/" + self.TmsEventId + "/" + self.Dut.get('VendorDeviceId') + "/" + self.TestCaseId + "/" + logTime + ".zip"
else:
self.LogFileName = BulkStorageServer + "/" + self.TmsEventId + "/" + self.TestCaseId + "/" + logTime + ".zip"
tmsFile = open(jsonFname, "w")
tmsDict = self.asDict()
if primaryTB == "" :
del tmsDict['PrimaryTestbed']
del tmsDict['PrimaryTestbedParticipantName']
del tmsDict['SupplementalTestbeds']
else:
line = self.Search_MasterTestInfo(self.TestCaseId, "STA")
sta_list = line.split(',')
if len(sta_list) <= 1:
del tmsDict['SupplementalTestbeds']
TmsFinalResult = {"TmsTestResult" : tmsDict}
json.dump(TmsFinalResult, tmsFile, indent=4)
tmsFile.close()
#func to get device_get_info capi resonse
def setDutDeviceInfo(self, displayname, response):
category = self.Search_MasterTestInfo(self.TestCaseId, "DUT_CAT")
|
sklam/numba
|
numba/cuda/tests/cudapy/test_nondet.py
|
Python
|
bsd-2-clause
| 1,378
| 0.001451
|
import numpy as np
from numba import cuda, float32
from numba.cuda.testing import unittest, CUDATestCase
def generate_input(n):
A = np.array(np.arange(n * n).reshape(n, n), dtype=np.float32)
B = np.array(np.arange
|
(n) + 0, dtype=A.dtype)
return A, B
class TestCudaNonDet(CUDATestCase):
de
|
f test_for_pre(self):
"""Test issue with loop not running due to bad sign-extension at the for loop
precondition.
"""
@cuda.jit(argtypes=[float32[:, :], float32[:, :], float32[:]])
def diagproduct(c, a, b):
startX, startY = cuda.grid(2)
gridX = cuda.gridDim.x * cuda.blockDim.x
gridY = cuda.gridDim.y * cuda.blockDim.y
height = c.shape[0]
width = c.shape[1]
for x in range(startX, width, (gridX)):
for y in range(startY, height, (gridY)):
c[y, x] = a[y, x] * b[x]
N = 8
A, B = generate_input(N)
F = np.empty(A.shape, dtype=A.dtype)
blockdim = (32, 8)
griddim = (1, 1)
dA = cuda.to_device(A)
dB = cuda.to_device(B)
dF = cuda.to_device(F, copy=False)
diagproduct[griddim, blockdim](dF, dA, dB)
E = np.dot(A, np.diag(B))
np.testing.assert_array_almost_equal(dF.copy_to_host(), E)
if __name__ == '__main__':
unittest.main()
|
groovey/Documentation
|
sphinx/demo_docs/source/conf.py
|
Python
|
mit
| 8,213
| 0.007062
|
# -*- coding: utf-8 -*-
#
# Sphinx RTD theme demo documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 3 11:56:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('./test_py_module'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Math
mathjax_path = "http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sphinx RTD theme demo'
copyright = u'2013, Dave Snider'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# 'sticky_navigation' : True # Set to False to disable the sticky nav while scrolling.
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["../.."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
|
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file
|
base name for HTML help builder.
htmlhelp_basename = 'SphinxRTDthemedemodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'SphinxRTDthemedemo.tex', u'Sphinx RTD theme demo Documentation',
u'Dave Snider', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sphinxrtdthemedemo', u'Sphinx RTD theme demo Documentation',
[u'Dave Snider'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SphinxRTDthemedemo', u'Sphinx RTD theme demo Documentation',
u'Dave Snider', 'SphinxRTDthemedemo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
Netflix/security_monkey
|
scripts/secmonkey_role_setup.py
|
Python
|
apache-2.0
| 7,737
| 0.00517
|
#!/usr/bin/env python
# Copyright 2014 Rocket-Internet
# Luca Bruno <luca.bruno@rocket-internet.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SecurityMonkey AWS role provisioning script
Grab credentials from ~/.boto (or other standard credentials sources).
Optionally accept "profile_name" as CLI parameter.
"""
import sys, json
import urllib
import boto
# FILL THIS IN
# Supervision account that can assume monitoring role
secmonkey_arn = 'arn:aws:iam::<awsaccountnumber>:role/SecurityMonkeyInstanceProfile'
trust_relationship = \
'''
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "%s"
},
"Action": "sts:AssumeRole"
}
]
}
'''
# Role with restricted security policy (list/get only)
role_name = 'SecurityMonkey'
role_policy_name = 'SecurityMonkeyPolicy'
policy = \
'''
{
"Statement": [
{
"Action": [
"acm:describecertificate",
"acm:listcertificates",
"cloudtrail:describetrails",
"cloudtrail:gettrailstatus",
"config:describecon
|
figrules",
"config:describ
|
econfigurationrecorders",
"directconnect:describeconnections",
"ec2:describeaddresses",
"ec2:describedhcpoptions",
"ec2:describeflowlogs",
"ec2:describeimages",
"ec2:describeimageattribute",
"ec2:describeinstances",
"ec2:describeinternetgateways",
"ec2:describekeypairs",
"ec2:describenatgateways",
"ec2:describenetworkacls",
"ec2:describenetworkinterfaces",
"ec2:describeregions",
"ec2:describeroutetables",
"ec2:describesecuritygroups",
"ec2:describesnapshots",
"ec2:describesnapshotattribute",
"ec2:describesubnets",
"ec2:describetags",
"ec2:describevolumes",
"ec2:describevpcendpoints",
"ec2:describevpcpeeringconnections",
"ec2:describevpcs",
"ec2:describevpnconnections",
"ec2:describevpngateways",
"elasticloadbalancing:describeloadbalancerattributes",
"elasticloadbalancing:describeloadbalancerpolicies",
"elasticloadbalancing:describeloadbalancers",
"elasticloadbalancing:describelisteners",
"elasticloadbalancing:describerules",
"elasticloadbalancing:describesslpolicies",
"elasticloadbalancing:describetags",
"elasticloadbalancing:describetargetgroups",
"elasticloadbalancing:describetargetgroupattributes",
"elasticloadbalancing:describetargethealth",
"es:describeelasticsearchdomainconfig",
"es:listdomainnames",
"iam:getaccesskeylastused",
"iam:getgroup",
"iam:getgrouppolicy",
"iam:getloginprofile",
"iam:getpolicyversion",
"iam:getrole",
"iam:getrolepolicy",
"iam:getservercertificate",
"iam:getuser",
"iam:getuserpolicy",
"iam:listaccesskeys",
"iam:listattachedgrouppolicies",
"iam:listattachedrolepolicies",
"iam:listattacheduserpolicies",
"iam:listentitiesforpolicy",
"iam:listgrouppolicies",
"iam:listgroups",
"iam:listinstanceprofilesforrole",
"iam:listmfadevices",
"iam:listpolicies",
"iam:listrolepolicies",
"iam:listroles",
"iam:listroletags",
"iam:listsamlproviders",
"iam:listservercertificates",
"iam:listsigningcertificates",
"iam:listuserpolicies",
"iam:listusers",
"kms:describekey",
"kms:getkeypolicy",
"kms:getkeyrotationstatus",
"kms:listaliases",
"kms:listgrants",
"kms:listkeypolicies",
"kms:listkeys",
"lambda:listfunctions",
"lambda:getfunctionconfiguration",
"lambda:getpolicy",
"lambda:listaliases",
"lambda:listeventsourcemappings",
"lambda:listtags",
"lambda:listversionsbyfunction",
"lambda:listfunctions",
"rds:describedbclusters",
"rds:describedbclustersnapshots",
"rds:describedbinstances",
"rds:describedbsecuritygroups",
"rds:describedbsnapshots",
"rds:describedbsnapshotattributes",
"rds:describedbsubnetgroups",
"redshift:describeclusters",
"route53:listhostedzones",
"route53:listresourcerecordsets",
"route53domains:listdomains",
"route53domains:getdomaindetail",
"s3:getbucketacl",
"s3:getbucketlocation",
"s3:getbucketlogging",
"s3:getbucketpolicy",
"s3:getbuckettagging",
"s3:getbucketversioning",
"s3:getlifecycleconfiguration",
"s3:listallmybuckets",
"ses:getidentityverificationattributes",
"ses:listidentities",
"ses:listverifiedemailaddresses",
"ses:sendemail",
"sns:gettopicattributes",
"sns:listsubscriptionsbytopic",
"sns:listtopics",
"sqs:getqueueattributes",
"sqs:listqueues",
"sqs:listqueuetags",
"sqs:listdeadlettersourcequeues"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
'''
def main(profile = None):
# Sanitize JSON
assume_policy = json.dumps(json.loads(trust_relationship % secmonkey_arn))
security_policy = json.dumps(json.loads(policy))
# Connect to IAM
(role_exist, current_policy) = (False, "")
try:
iam = boto.connect_iam(profile_name = profile)
except boto.exception.NoAuthHandlerFound:
sys.exit("Authentication failed, please check your credentials under ~/.boto")
# Check if role already exists
rlist = iam.list_roles()
for r in rlist['list_roles_response']['list_roles_result']['roles']:
if r['role_name'] == role_name:
role_exist = True
current_policy = json.loads(urllib.unquote(r['assume_role_policy_document']))
for p in current_policy['Statement']:
if p['Action'] == 'sts:AssumeRole':
if secmonkey_arn in p['Principal']['AWS'] :
# Already ok
sys.exit('Role "%s" already configured, not touching it.' % role_name)
else:
# Add another monitoring account
new_policy = [secmonkey_arn]
new_policy.extend(p['Principal']['AWS'])
p['Principal']['AWS'] = new_policy
assume_policy = json.dumps(current_policy)
# Add SecurityMonkey monitoring role and link it to supervisor ARN
if not role_exist:
role = iam.create_role(role_name, assume_policy)
else:
role = iam.update_assume_role_policy(role_name, assume_policy)
# Add our own role policy
iam.put_role_policy(role_name, role_policy_name, security_policy)
print('Added role "%s", linked to ARN "%s".' % (role_name, secmonkey_arn))
if __name__ == "__main__":
profile = None
if len(sys.argv) >= 2:
profile = sys.argv[1]
main(profile)
|
iFighting/flask
|
tests/test_templating.py
|
Python
|
bsd-3-clause
| 11,202
| 0.005981
|
# -*- coding: utf-8 -*-
"""
tests.templating
~~~~~~~~~~~~~~~~
Template functionality
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
import logging
from jinja2 import TemplateNotFound
def test_context_processing():
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'injected_value': 42}
@app.route('/')
def index():
return flask.render_template('context_template.html', value=23)
rv = app.test_client().get('/')
assert rv.data == b'<p>23|42'
def test_original_win():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template_string('{{ config }}', config=42)
rv = app.test_client().get('/')
assert rv.data == b'42'
def test_request_less_rendering():
app = flask.Flask(__name__)
app.config['WORLD_NAME'] = 'Special World'
@app.context_processor
def context_processor():
return dict(foo=42)
with app.app_context():
rv = flask.render_template_string('Hello {{ config.WORLD_NAME }} '
'{{ foo }}')
assert rv == 'Hello Special World 42'
def test_standard_context():
app = flask.Flask(__name__)
app.secret_key = 'development key'
@app.route('/')
def index():
flask.g.foo = 23
flask.session['test'] = 'aha'
return flask.render_template_string('''
{{ request.args.foo }}
{{ g.foo }}
{{ config.DEBUG }}
{{ session.test }}
''')
rv = app.test_client().get('/?foo=42')
assert rv.data.split() == [b'42', b'23', b'False', b'aha']
def test_escaping():
text = '<p>Hello World!'
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('escaping_template.html', text=text,
html=flask.Markup(text))
lines = app.test_client().get('/').data.splitlines()
assert lines == [
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!'
]
def test_no_escaping():
app = flask.Flask(__name__)
with app.test_request_context():
assert flask.render_template_string(
'{{ foo }}', foo='<test>') == '<test>'
assert flask.render_template('mail.txt', foo='<test>') == \
'<test> Mail'
def test_macros():
app = flask.Flask(__name__)
with app.test_request_context():
macro = flask.get_template_attribute('_macro.html', 'hello')
assert macro('World') == 'Hello World!'
def test_template_filter():
app = flask.Flask(__name__)
@app.template_filter()
def my_reverse(s):
return s[::-1]
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_add_template_filter():
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse)
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_template_filter_with_name():
app = flask.Flask(__name__)
@app.template_filter('strrev')
def my_reverse(s):
return s[::-1]
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_add_template_filter_with_name():
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'strrev')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_template_filter_with_template():
app = flask.Flask(__name__)
@app.template_filter()
def super_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_template():
app = flask.Flask(__name__)
def super_reverse(s):
return s[::-1]
app.add_template_filter(super_reverse)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_with_name_and_template():
app = flask.Flask(__name__)
@app.template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_name_and_template():
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'super_reverse')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_test():
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
assert 'boolean' in app.jinja_env.tests.keys(
|
)
assert app.jinja_env.tests['boolean'] == boolean
assert app.jinja_env.tests['boolean'](False)
def test_add_tem
|
plate_test():
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == boolean
assert app.jinja_env.tests['boolean'](False)
def test_template_test_with_name():
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_add_template_test_with_name():
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_template_test_with_template():
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_template():
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_with_name_and_template():
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_name_and_template():
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def t
|
USStateDept/FPA_Core
|
openspending/forum/utils/decorators.py
|
Python
|
agpl-3.0
| 1,481
| 0.002026
|
# -*- coding: utf-8 -*-
"""
flaskbb.utils.decorators
~~~~~~~~~~~~~~~~~~~~~~~~
A place for our decorators.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from functools import wraps
from flask import abort
from flask_login import current_user
from openspending.auth.perms import check_perm
from openspending.auth import *
def can_access_forum(func):
"""
If you are logged in you can view the forum
"""
def decorated(*args, **kwargs):
if not is_authenticated(current_user):
abort(403)
return func(*args, **kwargs)
# forum_id = kwargs['forum_id'] if 'forum_id' in kwargs else args[1]
# from openspending.forum.forum.models import Forum
# user_forums = Forum.query.all()
# if len(user_forums) < 1:
# abort(403)
# return func(*args, **kwargs)
return decorated
def can_access_topic(func):
def decorated(*args, **kwargs):
if not is_authenticated(current_user):
abort(403)
return func(*args, **kwargs)
# t
|
opic_id = kwargs['topic_id'] if 'topic_id' in kwargs else args[1]
# from openspending.forum.forum.models import Forum, Topic
# topic = Topic.query.filter_by(id=topic_id).first()
# user_forums = Forum.query.all()
# if len(user_forums) < 1:
# abort(403)
|
# return func(*args, **kwargs)
return decorated
|
yukondude/Twempest
|
build-readme.py
|
Python
|
gpl-3.0
| 1,279
| 0.002346
|
#!/usr/bin/env python
""" (Re)build the README.md file from README-template.md.
"""
# This file is part of Twempest. Copyright 2018 Dave Rogers <info@yukondude.com>. Licensed under the GNU General Publ
|
ic
# License, version 3. Refer to the attached LICENSE file or see <http://www.gnu.org/licenses/> for details.
import datetime
import os
import subprocess
import twempest
if __name__ == '__main__':
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "README-template.md"), "r") as f:
readme = f.
|
read()
today = datetime.date.today().strftime("%B %-d, %Y")
version = twempest.__version__
readme = readme.replace("@@TODAY@@", today)
readme = readme.replace("@@VERSION@@", version)
help_text = subprocess.check_output(["twempest", "--help"]).decode("utf-8").strip()
readme = readme.replace("@@HELPTEXT@@", help_text)
with open("twempest.config.sample", 'r') as f:
config_text = f.read().strip()
readme = readme.replace("@@CONFIGTEXT@@", config_text)
with open("twempest.template.sample", 'r') as f:
config_text = f.read().strip()
readme = readme.replace("@@TEMPLATETEXT@@", config_text)
with open(os.path.join(here, "README.md"), "w") as f:
f.write(readme)
|
openstack/python-heatclient
|
heatclient/osc/v1/resource_type.py
|
Python
|
apache-2.0
| 4,711
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Orchestrati
|
on v1 resource type implementations"""
import logging
from osc_lib.command import command
from osc_lib import exceptions as exc
from osc_lib.i18n import _
import six
from heatclient.common import format_utils
from heatclient.common import utils as heat_utils
from heatclie
|
nt import exc as heat_exc
class ResourceTypeShow(format_utils.YamlFormat):
"""Show details and optionally generate a template for a resource type."""
log = logging.getLogger(__name__ + ".ResourceTypeShow")
def get_parser(self, prog_name):
parser = super(ResourceTypeShow,
self).get_parser(prog_name)
parser.add_argument(
'resource_type',
metavar='<resource-type>',
help=_('Resource type to show details for'),
)
parser.add_argument(
'--template-type',
metavar='<template-type>',
help=_('Optional template type to generate, hot or cfn')
)
parser.add_argument(
'--long',
default=False,
action='store_true',
help=_('Show resource type with corresponding description.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
if parsed_args.template_type is not None and parsed_args.long:
msg = _('Cannot use --template-type and --long in one time.')
raise exc.CommandError(msg)
heat_client = self.app.client_manager.orchestration
return _show_resourcetype(heat_client, parsed_args)
def _show_resourcetype(heat_client, parsed_args):
try:
if parsed_args.template_type:
template_type = parsed_args.template_type.lower()
if template_type not in ('hot', 'cfn'):
raise exc.CommandError(
_('Template type invalid: %s') % parsed_args.template_type)
fields = {'resource_type': parsed_args.resource_type,
'template_type': template_type}
data = heat_client.resource_types.generate_template(**fields)
else:
data = heat_client.resource_types.get(parsed_args.resource_type,
parsed_args.long)
except heat_exc.HTTPNotFound:
raise exc.CommandError(
_('Resource type not found: %s') % parsed_args.resource_type)
rows = list(six.itervalues(data))
columns = list(six.iterkeys(data))
return columns, rows
class ResourceTypeList(command.Lister):
"""List resource types."""
log = logging.getLogger(__name__ + '.ResourceTypeList')
def get_parser(self, prog_name):
parser = super(ResourceTypeList,
self).get_parser(prog_name)
parser.add_argument(
'--filter',
dest='filter',
metavar='<key=value>',
help=_('Filter parameters to apply on returned resource types. '
'This can be specified multiple times. It can be any of '
'name, version or support_status'),
action='append'
)
parser.add_argument(
'--long',
default=False,
action='store_true',
help=_('Show resource types with corresponding description of '
'each resource type.')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
heat_client = self.app.client_manager.orchestration
return _list_resourcetypes(heat_client, parsed_args)
def _list_resourcetypes(heat_client, parsed_args):
resource_types = heat_client.resource_types.list(
filters=heat_utils.format_parameters(parsed_args.filter),
with_description=parsed_args.long
)
if parsed_args.long:
columns = ['Resource Type', 'Description']
rows = sorted([r.resource_type, r.description] for r in resource_types)
else:
columns = ['Resource Type']
rows = sorted([r.resource_type] for r in resource_types)
return columns, rows
|
caot/intellij-community
|
python/testData/inspections/PyTypeCheckerInspection/MetaClassIteration.py
|
Python
|
apache-2.0
| 340
| 0.014706
|
class M1(type):
def __it
|
er__(self):
pass
class M2(type):
|
pass
class C1(object):
__metaclass__ = M1
class C2(object):
__metaclass__ = M2
class B1(C1):
pass
for x in C1:
pass
for y in <warning descr="Expected 'collections.Iterable', got 'C2' instead">C2</warning>:
pass
for z in B1:
pass
|
cpodlesny/lisbon
|
src/contacts/views.py
|
Python
|
mit
| 6,745
| 0.000593
|
from django.contrib import messages
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.shortcuts import render, redirect, get_object_or_404
from django.utils.translation import ugettext_lazy as _
from helpers.models import Helpers
from offer.models import OfferCategory
from tours.models import Category
from .forms import ContactForm
from .models import Contact, ContactHelpers
def get_lang(request):
lang = request.LANGUAGE_CODE
return lang
def get_company():
return Helpers.objects.get(id=1).company_name
def contact_list(request):
lang = get_lang(request)
queryset_list = Contact.objects.all()
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '#', 'name': _('Contacts'), 'active': True}
]
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
paginator = Paginator(queryset_list, ContactHelpers.objects.get(id=1).pagination)
page_request_var = 'page'
page = request.GET.get(page_request_var)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
queryset = paginator.page(1)
except EmptyPage:
queryset = paginator.page(paginator.num_pages)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': _('Contacts'),
'breadcrumbs': breadcrumbs,
'object_list': queryset,
'page_request_var': page_request_var,
}
return render(request, 'partials/contact.html', context)
def contact_detail(request, pk=None):
lang = get_lang(request)
contact = Contact.objects.get(pk=pk)
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '/contacts', 'name': _('Contacts')},
{'url': '#', 'name': contact.first_name + ' ' + contact.last_name, 'active': True}
]
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': contact.first_name + ' ' + contact.last_name,
'breadcrumbs': breadcrumbs,
'object': contact,
}
return render(request, 'templates/_contact_details.html', context)
def contact_create(request):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
lang = get_lang(request)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
if not request.user.is_staff or not request.user.is_superuser:
return redirect('accounts:signup')
else:
form = ContactForm(request.POST or None, request.FILES or None)
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '/contacts', 'name': _('Contacts')},
{'url': '#', 'name': _('Create Contact'), 'active': True}
]
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
|
instance.save()
messages.success(request, _('Contact Created'))
return redirect('contact:list')
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategor
|
y.objects.all(),
},
'company': get_company(),
'title': _('Create Contact'),
'breadcrumbs': breadcrumbs,
'value': _('Add'),
'form': form
}
return render(request, 'templates/_form.html', context)
def contact_update(request, pk=None):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
lang = get_lang(request)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
if not request.user.is_staff or not request.user.is_superuser:
return redirect('accounts:signup')
else:
contact = get_object_or_404(Contact, pk=pk)
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '/contacts', 'name': _('Contacts')},
{'url': '#', 'name': contact.first_name + ' ' + contact.last_name, 'active': True}
]
form = ContactForm(request.POST or None, request.FILES or None, instance=contact)
if form.is_valid():
contact = form.save(commit=False)
contact.save()
messages.success(request, _('Contact saved'))
return redirect('contact:list')
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': _('Contact Edit'),
'breadcrumbs': breadcrumbs,
'instance': contact,
'form': form,
'value': _('Add'),
}
return render(request, 'templates/_form.html', context)
def contact_delete(request, pk=None):
if not request.user.is_staff or not request.user.is_superuser:
return redirect('accounts:signup')
instance = get_object_or_404(Contact, pk=pk)
instance.delete()
messages.success(request, _('Contact deleted'))
return redirect('contact:list')
|
datamade/nyc-councilmatic
|
nyc/models.py
|
Python
|
mit
| 4,078
| 0.002946
|
from django.conf import settings
from councilmatic_core.models import Bill, Organization, Action
from datetime import datetime
import pytz
app_timezone = pytz.timezone(settings.TIME_ZONE)
class NYCBill(Bill):
class Meta:
proxy = True
def __str__(self):
return self.friendly_name
# NYC CUSTOMIZATION
# the date that a bill was passed, if it has been passed
@property
def date_passed(self):
return self.actions.filter(classification='executive-signature').order_by('-order').first().date if self.actions.all() else None
# NYC CUSTOMIZATION
# makes a friendly name using bill type & number, e.g. 'Introduction 643-2015'
# b/c this is how NYC peeps most often refer to a bill
# this is what is used as the title (heading) for bills throughout the site (bill listing, bill detail)
@property
def friendly_name(self):
nums_only = self.identifier.split(' ')[-1]
return self.bill_type+' '+nums_only
# NYC CUSTOMIZATION
# this is b/c we don't have data on bills voted against, only bills passed -
# everything else is just left to die silently ¯\_(ツ)_/¯
# turns out that ~80% of nyc bills that get passed, are passed within
# 2 months of the last action
# using 6 months instead of 2 months for cutoff, to minimize incorrectly labeling
# in-progress legislation as stale
|
def _is_stale(self, last_acti
|
on_date):
# stale = no action for 6 months
if last_action_date:
timediff = datetime.now().replace(tzinfo=app_timezone) - last_action_date
return (timediff.days > 150)
else:
return True
# NYC CUSTOMIZATION
# whether or not a bill has reached its final 'completed' status
# what the final status is depends on bill type
def _terminal_status(self, history, bill_type):
if history:
if bill_type == 'Introduction':
if 'executive-signature' in history:
return 'Passed'
else:
return False
elif bill_type in ['Resolution', 'Land Use Application', 'Communication', "Mayor's Message", 'Land Use Call-Up']:
if 'passage' in history:
return 'Approved'
else:
return False
else:
return False
# NYC CUSTOMIZATION
# whether or not something has an approval among any of this actions
# planning on using this for a progress bar for bills to lay out all the steps to law & how far it has gotten
# (e.g. introduced -> approved by committee -> approved by council -> approved by mayor)
@property
def _is_approved(self):
if self.actions:
return any(['Approved' in a.description for a in self.actions.all()])
else:
return False
# NYC CUSTOMIZATION
# the 'current status' of a bill, inferred with some custom logic
# this is used in the colored label in bill listings
@property
def inferred_status(self):
actions = self.actions.all().order_by('-order')
classification_hist = [a.classification for a in actions]
last_action_date = actions[0].date if actions else None
bill_type = self.bill_type
# these are the bill types for which a status doesn't make sense
if bill_type in ['SLR', 'Petition', 'Local Laws 2015']:
return None
elif self._terminal_status(classification_hist, bill_type):
return self._terminal_status(classification_hist, bill_type)
elif self._is_stale(last_action_date):
return 'Inactive'
else:
return 'Active'
# NYC CUSTOMIZATION
# this is used for the text description of a bill in bill listings
# the abstract is usually friendlier, so we want to use that whenever it's available,
# & have the description as a fallback
def listing_description(self):
if self.abstract:
return self.abstract
else:
return self.description
|
ruxkor/pulp-or
|
setup.py
|
Python
|
mit
| 1,930
| 0.013472
|
#!/usr/bin/env/python
"""
Setup script for PuLP added by Stuart Mitchell 2007
Copyright 2007 Stuart Mitchell
"""
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
Description = open('README').read()
License = open('LICENSE').read()
Version = open('VERSION').read().strip()
setup(name="PuLP",
version=Version,
description="""
PuLP is an LP modeler written in python. PuLP can generate MPS or LP files
and call GLPK, COIN CLP/CBC, CPLEX, and GUROBI to solve linear
problems.
""",
long_description = Description,
license = License,
keywords = ["Optimization", "Linear Programming", "Operations Research"],
author="J.S. Roy and S.A. Mitchell",
author_email="s.mitchell@auckland.ac.nz",
url="http://pulp-or.googlecode.com/",
classifiers = ['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Mathematics',
],
#ext_modules = [pulpCOIN
|
],
package_dir={'':'src'},
packages = ['pulp', 'pulp.solverdir'],
package_data = {'pulp' : ["AUTHORS","LICENSE",
"pulp.cfg.linux",
"pulp.cfg.win",
"LICENSE.CoinMP.txt",
"AUTHORS.CoinMP.txt",
"README.CoinMP.txt",
],
|
'pulp.solverdir' : ['*','*.*']},
install_requires = ['pyparsing>=1.5.2'],
entry_points = ("""
[console_scripts]
pulptest = pulp:pulpTestAll
pulpdoctest = pulp:pulpDoctest
"""
),
)
|
synapse-wireless/bulk-reprogramming
|
snappyImages/synapse/sysInfo.py
|
Python
|
apache-2.0
| 4,335
| 0.026298
|
# (c) Copyright 2008-2015 Synapse Wireless, Inc.
"""System Info IDs - used in 'getInfo()' and 'getStat()' calls"""
# Types
SI_TYPE_VENDOR = 0
SI_TYPE_RADIO = 1
SI_TYPE_CPU = 2
SI_TYPE_PLATFORM = 3
SI_TYPE_BUILD = 4
SI_TYPE_VERSION_MAJOR = 5
SI_TYPE_VERSION_MINOR = 6
SI_TYPE_VERSION_BUILD = 7
SI_ENCRYPTION_INFO = 8
# SNAP 2.4 Additions
SI_RPC_PACKET_SENT_ID = 9
SI_RPC_IS_MULTICAST_ID = 10
SI_RPC_IS_MULTICAST = SI_RPC_IS_MULTICAST_ID # (just an alias)
SI_MULTI_PKT_TTL_ID = 11
SI_MULTI_PKT_TTL = SI_MULTI_PKT_TTL_ID # (just an alias)
SI_SMALL_STRS_REMAINING = 12 # Embedded nodes only
SI_MEDIUM_STRS_REMAINING = 13 # Embedded nodes only
SI_ROUTE_TABLE_SIZE = 14
SI_ROUTES_IN_TABLE = 15
SI_BANK_FREE_SPACE = 16 # Embedded nodes only
# SNAP 2.5 Additions
SI_RF200A_FLAG = 17 # Embedded nodes only
SI_STDIN_HOOK_STATUS = 18 # Embedded nodes only
# SNAP 2.6 Additions
SI_TINY_STRS_REMAINING = 19 # Embedded nodes only
SI_LARGE_STRS_REMAINING = 20 # Embedded nodes only
SI_SCRIPT_FIRST_RUN_STATUS = 21 # Embedded nodes only
SI_SCRIPT_BASE_ADDR = 22 # Embedded nodes only
SI_SCRIPT_BASE_BANK = 23 # Embedded nodes only
SI_RPC_IS_DIRECTED_MULTICAST = 24
SI_DELAY_FACTOR = 25 # Directed Multicast only
SI_ADDRESS_INDEX = 26 # Directed Multi
|
cast only
SI_MULTI_PKT_GROUP = 27 # Multicast or Directed Multicast only
SI_MULTI_PKT_ORIGINAL_TTL = 28 # Directed Multicast only
# Vendors
SI_VENDOR_SYNAPSE = 0
SI_VENDOR_FREESCALE = 2 # value = 1 skipped
SI_VENDOR_CEL = 3
SI_VENDOR_ATMEL = 4
SI_VENDOR_SILICON_LABS = 5
# Radios
SI_RADI
|
O_802_15_4 = 0
SI_RADIO_NONE = 1
SI_RADIO_900 = 2
# CPUs
SI_CPU_MC9S08GT60A = 0
SI_CPU_8051 = 1
SI_CPU_MC9S08QE = 2
SI_CPU_COLDFIRE = 3
SI_CPU_ARM7 = 4
SI_CPU_ATMEGA = 5
SI_CPU_SI1000 = 6
SI_CPU_SI1000 = 6
SI_CPU_X86 = 7
SI_CPU_UNKNOWN = 8
SI_CPU_SPARC_LEON = 9
SI_CPU_ARM_CORTEX_M3 = 10
SI_CPU_ATMEGA128RFR2 = 12 # Depricated
SI_CPU_ATMEGA1284RFR2 = 13 # Depricated
# Platforms
SI_PLATFORM_RF_ENGINE = 0
SI_PLATFORM_CEL_ZIC2410 = 3 # values [1, 2, 4] skipped
SI_PLATFORM_MC1321X = 5
SI_PLATFORM_ATMEGA128RFA1 = 6
SI_PLATFORM_SNAPCOM = 7
SI_PLATFORM_SI1000 = 8
SI_PLATFORM_MC1322X = 9
SI100X_FHSS = 11 # value [10, 12] skipped
SI_PLATFORM_SI100X_KADEX = 11
SI_PLATFORM_RF300 = 13
SI_PLATFORM_RF200_PFM = 14
SI_PLATFORM_SM300 = 15
SI_PLATFORM_SM301 = 16
SI_PLATFORM_SM200_PFM = 17
SI_PLATFORM_RN_G2C547 = 18
SI_PLATFORM_RF266_PFM = 19
SI_PLATFORM_STM32W108xB = 20
SI_PLATFORM_SM222_PFM = 25 # value [21, 22, 23, 24] skipped
SI_PLATFORM_ATmega128RFR2_PFM = 26
SI_PLATFORM_SM220UF1_PFM = 27
SI_PLATFORM_ATmega1284RFR2_PFM = 28
# Builds
SI_BUILD_DEBUG = 0
SI_BUILD_RELEASE = 1
# Encryptions
SI_NO_ENCRYPTION = 0
SI_AES128_ENCRYPTION = 1
SI_SNAP_ENCRYPTION = 2
# getStat() Enumerations
STAT_DS_NULL_TX_BUFFERS = 0
STAT_DS_UART0_RX_BUFFERS = 1
STAT_DS_UART0_TX_BUFFERS = 2
STAT_DS_UART1_RX_BUFFERS = 3
STAT_DS_UART_TX_BUFFERS = 4
STAT_DS_TRANSPARENT_RX_BUFFERS = 5
STAT_DS_TRANSPARENT_TX_BUFFERS = 6
STAT_DS_PACKET_SERIAL_RX_BUFFERS = 7
STAT_DS_PACKET_SERIAL_TX_BUFFERS = 8
STAT_DS_RADIO_RX_BUFFERS = 9
STAT_DS_RADIO_TX_BUFFERS = 10
STAT_RADIO_FORWARDED_UNICASTS = 11
STAT_PACKET_SERIAL_FORWARDED_UNICASTS = 12
STAT_RADIO_FORWARDED_XCASTS = 13
STAT_PACKET_SERIAL_FORWARDED_XCASTS = 14
STAT_PACKET_SERIAL_RETRIES = 15 # Debug Builds Only
STAT_PACKET_SERIAL_FAILURES = 16 # Debug Builds Only
STAT_PACKET_SERIAL_RX_ERRORS = 17 # Debug Builds Only
STAT_PACKET_SERIAL_RX_BAD_CKSUM = 18 # Debug Builds Only
STAT_PACKET_SERIAL_NUM_RX_ACKS = 19 # Debug Builds Only
STAT_PACKET_SERIAL_NUM_RX_DUPS = 20 # Debug Builds Only
STAT_PACKET_SERIAL_NO_ROOMS = 21 # Debug Builds Only
|
BizarroSolutions/mcg
|
cli/init.py
|
Python
|
gpl-3.0
| 1,266
| 0.00079
|
from cement.utils.misc import init_defaults
from configparser import NoSectionError
from cli.Mcg import Mcg
from core.db.MongoDB import MongoDB
def main():
# Logging config (mcg section in file mcg.conf)
defaults = init_defaults('mcg', 'log.logging')
defaults['log.logging']['file'] = 'mcg.log'
with Mcg('mcg', config_defaults=defaults) as app:
# First setup the application
app.setup()
# Parse the configuration file
# app.config.parse_file('/etc/mcg/mcg.conf')
# app.config.parse_file('C:/mcg/mcg.conf')
try:
MongoDB(
{
"user": app.config.get('mongodb', 'user'),
"password": app.config.get('mongodb', 'password'),
"host": app.config.get('mongodb', 'host'),
"port": app.config.get('mongodb', 'port'),
"db": app.
|
config.ge
|
t('mongodb', 'db')
}
)
except NoSectionError:
print("Configuration File Not Found or [mongodb] Section Not Found")
print("Create the file /etc/mcg/mcg.conf for Linux Systems")
print("Create the file C:/mcg/mcg.conf for Windows Systems")
app.run()
app.close()
|
zasdfgbnm/qutip
|
qutip/tests/test_metrics.py
|
Python
|
bsd-3-clause
| 5,002
| 0.0002
|
# -*- coding: utf-8 -*-
"""
Simple tests for metrics and pseudometrics implemented in
the qutip.metrics module.
"""
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
from numpy import abs, sqrt
from numpy.testing import assert_, run_module_suite
import scipy
from qutip.operators import create, destroy, jmat, identity, qdiags
from qutip.states import fock_dm
from qutip.propagator import propagator
from qutip.random_objects import rand_herm, rand_dm, rand_unitary, rand_ket
from qutip.metrics import *
"""
A test class for the metrics and pseudo-metrics included with QuTiP.
"""
def test_fid_trdist_limits():
"""
Metrics: Fidelity / trace distance limiting cases
"""
rho = rand_dm(25, 0.25)
assert_(abs(fidelity(rho, rho)-1) < 1e-6)
assert_(tracedist(rho, rho) < 1e-6)
rho1 = fock_dm(5, 1)
rho2 = fock_dm(5, 2)
assert_(fidelity(rho1, rho2) < 1e-6)
assert_(abs(tracedist(rho1, rho2)-1) < 1e-6)
def test_fidelity1():
|
"""
Metrics: Fidelity, mixed state inequality
"""
for k in range(10):
rho1 = rand_dm(25, 0.25)
rho2 = rand_dm(25, 0.25)
F =
|
fidelity(rho1, rho2)
assert_(1-F <= sqrt(1-F**2))
def test_fidelity2():
"""
Metrics: Fidelity, invariance under unitary trans.
"""
for k in range(10):
rho1 = rand_dm(25, 0.25)
rho2 = rand_dm(25, 0.25)
U = rand_unitary(25, 0.25)
F = fidelity(rho1, rho2)
FU = fidelity(U*rho1*U.dag(), U*rho2*U.dag())
assert_(abs((F-FU)/F) < 1e-5)
def test_tracedist1():
"""
Metrics: Trace dist., invariance under unitary trans.
"""
for k in range(10):
rho1 = rand_dm(25, 0.25)
rho2 = rand_dm(25, 0.25)
U = rand_unitary(25, 0.25)
D = tracedist(rho1, rho2)
DU = tracedist(U*rho1*U.dag(), U*rho2*U.dag())
assert_(abs((D-DU)/D) < 1e-5)
def test_tracedist2():
"""
Metrics: Trace dist. & Fidelity mixed/mixed inequality
"""
for k in range(10):
rho1 = rand_dm(25, 0.25)
rho2 = rand_dm(25, 0.25)
F = fidelity(rho1, rho2)
D = tracedist(rho1, rho2)
assert_(1-F <= D)
def test_tracedist3():
"""
Metrics: Trace dist. & Fidelity mixed/pure inequality
"""
for k in range(10):
ket = rand_ket(25, 0.25)
rho1 = ket*ket.dag()
rho2 = rand_dm(25, 0.25)
F = fidelity(rho1, rho2)
D = tracedist(rho1, rho2)
assert_(1-F**2 <= D)
def rand_super():
h_5 = rand_herm(5)
return propagator(h_5, scipy.rand(), [
create(5), destroy(5), jmat(2, 'z')
])
def test_average_gate_fidelity():
"""
Metrics: Check avg gate fidelities for random
maps (equal to 1 for id maps).
"""
for dims in range(2, 5):
assert_(abs(average_gate_fidelity(identity(dims)) - 1) <= 1e-12)
assert_(0 <= average_gate_fidelity(rand_super()) <= 1)
def test_hilbert_dist():
"""
Metrics: Hilbert distance.
"""
diag1 = np.array([0.5, 0.5, 0, 0])
diag2 = np.array([0, 0, 0.5, 0.5])
r1 = qdiags(diag1, 0)
r2 = qdiags(diag2, 0)
assert_(abs(hilbert_dist(r1, r2)-1) <= 1e-6)
if __name__ == "__main__":
run_module_suite()
|
qsnake/werkzeug
|
examples/plnt/utils.py
|
Python
|
bsd-3-clause
| 3,618
| 0.000276
|
# -*- coding: utf-8 -*-
"""
plnt.utils
~~~~~~~~~~
The planet utilities.
:copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import re
from os import path
from jinja2 import Environment, FileSystemLoader
from werkzeug import Response, Local, LocalManager, url_encode, \
url_quote, cached_property
from werkzeug.routing import Map, Rule
# context locals. these two objects are use by the application to
# bind objects to the current context. A context is defined as the
# current thread and the current greenlet if there is greenlet support.
# the `get_request` and `get_application` functions look up the request
# and application objects from this local manager.
local = Local()
local_manager = LocalManager([local])
# proxy objects
request = local('request')
application = local('application')
url_adapter = local('url_adapter')
# let's use jinja for templates this time
template_path = path.join(path.dirname(__file__), 'templates')
jinja_env = Environment(loader=FileSystemLoader(template_path))
# the collected url patterns
url_map = Map([Rule('/shared/<path:file>', endpoint='shared')])
endpoints = {}
_par_re = re.compile(r'\n{2,}')
_entity_re = re.compile(r'&([^;]+);')
_striptags_re = re.compile(r'(<!--.*-->|<[^>]*>)')
from htmlentitydefs import name2codepoint
html_entities = name2codepoint.copy()
html_entities['apos'] = 39
del name2codepoint
def expose(url_rule, endpoint=None, **kwargs):
"""Expose this function to the web layer."""
def decorate(f):
e = endpoint or f.__name__
endpoints[e] = f
url_map.add(Rule(url_rule, endpoint=e, **kwargs))
return f
return decorate
def render_template(template_name, **context):
"""Render a template into a response."""
tmpl = jinja_env.get_template(template_name)
context['url_for'] = url_for
return Response(tmpl.render(context), mimetype='text/html')
def nl2p(s):
"""Add paragraphs to a text."""
return u'\n'.join(u'<p>%s</p>' % p for p in _par_re.split(s))
def url_for(endpoint, **kw):
"""Simple function for URL generation."""
return url_adapter.build(endpoint, kw)
def strip_tags(s):
"""Resolve HTML entities and remove tags from a string."""
def handle_match(m):
name = m.group(1)
if name in html_entities:
return unichr(html_entities[name])
if name[:2] in ('#x', '#X'):
try:
return unichr(int(name[2:], 16))
except ValueError:
return u''
elif name.startswith('#'):
try:
return unichr(int(name[1:]))
except ValueError:
return u''
return u''
return _entity_re.sub(handle_m
|
atch, _striptags_re.sub('', s))
class Pagination(object):
"""
Paginate a SQLAlchemy query object.
"""
def __init__(self, query, per_page, page, endpoint):
self.query = query
self.per_page = per_page
self.page
|
= page
self.endpoint = endpoint
@cached_property
def entries(self):
return self.query.offset((self.page - 1) * self.per_page) \
.limit(self.per_page).all()
@cached_property
def count(self):
return self.query.count()
has_previous = property(lambda x: x.page > 1)
has_next = property(lambda x: x.page < x.pages)
previous = property(lambda x: url_for(x.endpoint, page=x.page - 1))
next = property(lambda x: url_for(x.endpoint, page=x.page + 1))
pages = property(lambda x: max(0, x.count - 1) // x.per_page + 1)
|
arjitc/librenms
|
LibreNMS/service.py
|
Python
|
gpl-3.0
| 24,575
| 0.004557
|
import LibreNMS
import json
import logging
import os
import pymysql
import subprocess
import threading
import sys
import time
from datetime import timedelta
from datetime import datetime
from logging import debug, info, warning, error, critical, exception
from platform import python_version
from time import sleep
from socket import gethostname
from signal import signal, SIGTERM
from uuid import uuid1
class ServiceConfig:
def __init__(self):
"""
Stores all of the configuration variables for the LibreNMS service in a common object
Starts with defaults, but can be populated with variables from config.php by c
|
alling populate()
"""
self._uuid = str(uuid1())
self.set_name(gethostname())
def set_name(self, name):
if name:
self.name = name.strip()
self.unique_name = "{}-{}".format(self.name, self._uuid)
class PollerConfig:
def __init__(self, workers, frequency, calculate=None):
self.enabled = True
self.workers = workers
self.frequency = frequency
self.calc
|
ulate = calculate
# config variables with defaults
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
node_id = None
name = None
unique_name = None
single_instance = True
distributed = False
group = 0
debug = False
log_level = 20
max_db_failures = 5
alerting = PollerConfig(1, 60)
poller = PollerConfig(24, 300)
services = PollerConfig(8, 300)
discovery = PollerConfig(16, 21600)
billing = PollerConfig(2, 300, 60)
ping = PollerConfig(1, 120)
down_retry = 60
update_enabled = True
update_frequency = 86400
master_resolution = 1
master_timeout = 10
redis_host = 'localhost'
redis_port = 6379
redis_db = 0
redis_pass = None
redis_socket = None
redis_sentinel = None
redis_sentinel_service = None
redis_timeout = 60
db_host = 'localhost'
db_port = 0
db_socket = None
db_user = 'librenms'
db_pass = ''
db_name = 'librenms'
watchdog_enabled = False
watchdog_logfile = 'logs/librenms.log'
def populate(self):
config = self._get_config_data()
# populate config variables
self.node_id = os.getenv('NODE_ID')
self.set_name(config.get('distributed_poller_name', None))
self.distributed = config.get('distributed_poller', ServiceConfig.distributed)
self.group = ServiceConfig.parse_group(config.get('distributed_poller_group', ServiceConfig.group))
# backward compatible options
self.poller.workers = config.get('poller_service_workers', ServiceConfig.poller.workers)
self.poller.frequency = config.get('poller_service_poll_frequency', ServiceConfig.poller.frequency)
self.discovery.frequency = config.get('poller_service_discover_frequency', ServiceConfig.discovery.frequency)
self.down_retry = config.get('poller_service_down_retry', ServiceConfig.down_retry)
self.log_level = config.get('poller_service_loglevel', ServiceConfig.log_level)
# new options
self.poller.enabled = config.get('service_poller_enabled', True) # unused
self.poller.workers = config.get('service_poller_workers', ServiceConfig.poller.workers)
self.poller.frequency = config.get('service_poller_frequency', ServiceConfig.poller.frequency)
self.discovery.enabled = config.get('service_discovery_enabled', True) # unused
self.discovery.workers = config.get('service_discovery_workers', ServiceConfig.discovery.workers)
self.discovery.frequency = config.get('service_discovery_frequency', ServiceConfig.discovery.frequency)
self.services.enabled = config.get('service_services_enabled', True)
self.services.workers = config.get('service_services_workers', ServiceConfig.services.workers)
self.services.frequency = config.get('service_services_frequency', ServiceConfig.services.frequency)
self.billing.enabled = config.get('service_billing_enabled', True)
self.billing.frequency = config.get('service_billing_frequency', ServiceConfig.billing.frequency)
self.billing.calculate = config.get('service_billing_calculate_frequency', ServiceConfig.billing.calculate)
self.alerting.enabled = config.get('service_alerting_enabled', True)
self.alerting.frequency = config.get('service_alerting_frequency', ServiceConfig.alerting.frequency)
self.ping.enabled = config.get('service_ping_enabled', False)
self.ping.frequency = config.get('ping_rrd_step', ServiceConfig.billing.calculate)
self.down_retry = config.get('service_poller_down_retry', ServiceConfig.down_retry)
self.log_level = config.get('service_loglevel', ServiceConfig.log_level)
self.update_enabled = config.get('service_update_enabled', ServiceConfig.update_enabled)
self.update_frequency = config.get('service_update_frequency', ServiceConfig.update_frequency)
self.redis_host = os.getenv('REDIS_HOST', config.get('redis_host', ServiceConfig.redis_host))
self.redis_db = os.getenv('REDIS_DB', config.get('redis_db', ServiceConfig.redis_db))
self.redis_pass = os.getenv('REDIS_PASSWORD', config.get('redis_pass', ServiceConfig.redis_pass))
self.redis_port = int(os.getenv('REDIS_PORT', config.get('redis_port', ServiceConfig.redis_port)))
self.redis_socket = os.getenv('REDIS_SOCKET', config.get('redis_socket', ServiceConfig.redis_socket))
self.redis_sentinel = os.getenv('REDIS_SENTINEL', config.get('redis_sentinel', ServiceConfig.redis_sentinel))
self.redis_sentinel_service = os.getenv('REDIS_SENTINEL_SERVICE',
config.get('redis_sentinel_service',
ServiceConfig.redis_sentinel_service))
self.redis_timeout = os.getenv('REDIS_TIMEOUT', self.alerting.frequency if self.alerting.frequency != 0 else self.redis_timeout)
self.db_host = os.getenv('DB_HOST', config.get('db_host', ServiceConfig.db_host))
self.db_name = os.getenv('DB_DATABASE', config.get('db_name', ServiceConfig.db_name))
self.db_pass = os.getenv('DB_PASSWORD', config.get('db_pass', ServiceConfig.db_pass))
self.db_port = int(os.getenv('DB_PORT', config.get('db_port', ServiceConfig.db_port)))
self.db_socket = os.getenv('DB_SOCKET', config.get('db_socket', ServiceConfig.db_socket))
self.db_user = os.getenv('DB_USERNAME', config.get('db_user', ServiceConfig.db_user))
self.watchdog_enabled = config.get('service_watchdog_enabled', ServiceConfig.watchdog_enabled)
self.watchdog_logfile = config.get('log_file', ServiceConfig.watchdog_logfile)
# set convenient debug variable
self.debug = logging.getLogger().isEnabledFor(logging.DEBUG)
if not self.debug and self.log_level:
try:
logging.getLogger().setLevel(self.log_level)
except ValueError:
error("Unknown log level {}, must be one of 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'".format(self.log_level))
logging.getLogger().setLevel(logging.INFO)
def _get_config_data(self):
try:
import dotenv
env_path = "{}/.env".format(self.BASE_DIR)
info("Attempting to load .env from '%s'", env_path)
dotenv.load_dotenv(dotenv_path=env_path, verbose=True)
if not os.getenv('NODE_ID'):
raise ImportError(".env does not contain a valid NODE_ID setting.")
except ImportError as e:
exception("Could not import .env - check that the poller user can read the file, and that composer install has been run recently")
sys.exit(3)
config_cmd = ['/usr/bin/env', 'php', '{}/config_to_json.php'.format(self.BASE_DIR), '2>&1']
try:
return json.loads(subprocess.check_output(config_cmd).decode())
except subprocess.CalledProcessError as e:
error("ERROR: Coul
|
nlgcoin/guldencoin-official
|
test/functional/feature_minchainwork.py
|
Python
|
mit
| 4,129
| 0.003875
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(GuldenTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testi
|
ng
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
|
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generatetoaddress(num_blocks_to_generate,
self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
###assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
###assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
###assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
|
kornai/4lang
|
exp/corp/xsum.py
|
Python
|
mit
| 63
| 0
|
import sys
print(sum(in
|
t(line.stri
|
p()) for line in sys.stdin))
|
ecolell/aquire
|
version.py
|
Python
|
mit
| 86
| 0.023256
|
# D
|
o not edit this file, pipeline versioning is governed by git tags
__versio
|
n__=0.0.0
|
cread/ec2id
|
cherrypy/test/test_proxy.py
|
Python
|
apache-2.0
| 4,957
| 0.006052
|
from cherrypy.test import test
test.prefer_parent_path()
import cherrypy
script_names = ["", "/path/to/myapp"]
def setup_server():
# Set up site
cherrypy.config.update({
'environment': 'test_suite',
'tools.proxy.on': True,
'tools.proxy.base': 'www.mydomain.test',
})
# Set up application
class Root:
def __init__(self, sn):
# Calculate a URL outside of any requests.
self.thisnewpage = cherrypy.url("/this/new/page", script_name=sn)
|
def pageurl(self):
return self.thisnewpage
pageurl.exposed = True
def index(self)
|
:
raise cherrypy.HTTPRedirect('dummy')
index.exposed = True
def remoteip(self):
return cherrypy.request.remote.ip
remoteip.exposed = True
def xhost(self):
raise cherrypy.HTTPRedirect('blah')
xhost.exposed = True
xhost._cp_config = {'tools.proxy.local': 'X-Host',
'tools.trailing_slash.extra': True,
}
def base(self):
return cherrypy.request.base
base.exposed = True
def ssl(self):
return cherrypy.request.base
ssl.exposed = True
ssl._cp_config = {'tools.proxy.scheme': 'X-Forwarded-Ssl'}
def newurl(self):
return ("Browse to <a href='%s'>this page</a>."
% cherrypy.url("/this/new/page"))
newurl.exposed = True
for sn in script_names:
cherrypy.tree.mount(Root(sn), sn)
from cherrypy.test import helper
class ProxyTest(helper.CPWebCase):
def testProxy(self):
self.getPage("/")
self.assertHeader('Location',
"%s://www.mydomain.test%s/dummy" %
(self.scheme, self.prefix()))
# Test X-Forwarded-Host (Apache 1.3.33+ and Apache 2)
self.getPage("/", headers=[('X-Forwarded-Host', 'http://www.example.test')])
self.assertHeader('Location', "http://www.example.test/dummy")
self.getPage("/", headers=[('X-Forwarded-Host', 'www.example.test')])
self.assertHeader('Location', "%s://www.example.test/dummy" % self.scheme)
# Test X-Forwarded-For (Apache2)
self.getPage("/remoteip",
headers=[('X-Forwarded-For', '192.168.0.20')])
self.assertBody("192.168.0.20")
self.getPage("/remoteip",
headers=[('X-Forwarded-For', '67.15.36.43, 192.168.0.20')])
self.assertBody("192.168.0.20")
# Test X-Host (lighttpd; see https://trac.lighttpd.net/trac/ticket/418)
self.getPage("/xhost", headers=[('X-Host', 'www.example.test')])
self.assertHeader('Location', "%s://www.example.test/blah" % self.scheme)
# Test X-Forwarded-Proto (lighttpd)
self.getPage("/base", headers=[('X-Forwarded-Proto', 'https')])
self.assertBody("https://www.mydomain.test")
# Test X-Forwarded-Ssl (webfaction?)
self.getPage("/ssl", headers=[('X-Forwarded-Ssl', 'on')])
self.assertBody("https://www.mydomain.test")
# Test cherrypy.url()
for sn in script_names:
# Test the value inside requests
self.getPage(sn + "/newurl")
self.assertBody("Browse to <a href='%s://www.mydomain.test" % self.scheme
+ sn + "/this/new/page'>this page</a>.")
self.getPage(sn + "/newurl", headers=[('X-Forwarded-Host',
'http://www.example.test')])
self.assertBody("Browse to <a href='http://www.example.test"
+ sn + "/this/new/page'>this page</a>.")
# Test the value outside requests
port = ""
if self.scheme == "http" and self.PORT != 80:
port = ":%s" % self.PORT
elif self.scheme == "https" and self.PORT != 443:
port = ":%s" % self.PORT
host = self.HOST
if host in ('0.0.0.0', '::'):
import socket
host = socket.gethostname()
expected = ("%s://%s%s%s/this/new/page"
% (self.scheme, host, port, sn))
self.getPage(sn + "/pageurl")
self.assertBody(expected)
# Test trailing slash (see http://www.cherrypy.org/ticket/562).
self.getPage("/xhost/", headers=[('X-Host', 'www.example.test')])
self.assertHeader('Location', "%s://www.example.test/xhost"
% self.scheme)
if __name__ == '__main__':
setup_server()
helper.testmain()
|
nburn42/tensorflow
|
tensorflow/python/kernel_tests/identity_op_py_test.py
|
Python
|
apache-2.0
| 2,642
| 0.007949
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed unde
|
r the Apache License, Version 2.0 (the "License");
# you may not use t
|
his file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for IdentityOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class IdentityOpTest(test.TestCase):
def testInt32_6(self):
with self.test_session():
value = array_ops.identity([1, 2, 3, 4, 5, 6]).eval()
self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), value)
def testInt32_2_3(self):
with self.test_session():
inp = constant_op.constant([10, 20, 30, 40, 50, 60], shape=[2, 3])
value = array_ops.identity(inp).eval()
self.assertAllEqual(np.array([[10, 20, 30], [40, 50, 60]]), value)
def testString(self):
source = [b"A", b"b", b"C", b"d", b"E", b"f"]
with self.test_session():
value = array_ops.identity(source).eval()
self.assertAllEqual(source, value)
def testIdentityShape(self):
with self.test_session():
shape = [2, 3]
array_2x3 = [[1, 2, 3], [6, 5, 4]]
tensor = constant_op.constant(array_2x3)
self.assertEquals(shape, tensor.get_shape())
self.assertEquals(shape, array_ops.identity(tensor).get_shape())
self.assertEquals(shape, array_ops.identity(array_2x3).get_shape())
self.assertEquals(shape,
array_ops.identity(np.array(array_2x3)).get_shape())
def testRefIdentityShape(self):
with self.test_session():
shape = [2, 3]
tensor = variables.Variable(
constant_op.constant(
[[1, 2, 3], [6, 5, 4]], dtype=dtypes.int32))
self.assertEquals(shape, tensor.get_shape())
self.assertEquals(shape, gen_array_ops.ref_identity(tensor).get_shape())
if __name__ == "__main__":
test.main()
|
pivotaccess2007/RapidSMS-Rwanda
|
apps/poll/urls.py
|
Python
|
lgpl-3.0
| 992
| 0.027218
|
import os
import views as pv
from django.conf.urls.defaults import *
urlpatterns = patterns('',
# serve assets via django, during development
(r'^poll/assets/(?P<path>.*)$', "django.views.static.serve",
{"document_root": os.path.dirname(__file__) + "/assets"}),
# graphs are generated and stored to be viewed statically
(r'^poll/graphs/(?P<path>.*)$', "django.views.static.serve",
{"document_root": os.path.dirname(__file__) + "/graphs"}),
# poll views (move to poll/urls.py)
(r'^poll$', pv.dashboard)
|
,
(r'^poll/dashboard$', pv.dashboard),
(r'^poll/dashboard/(?P<id>\d+)$', pv.dashboard),
(r'^poll/questions$', pv.manage_questions),
(r'^poll/question/(?P<id>\d+)$', pv.manage_questions),
(r'^poll/question/(?P<id>\d+)/edit$', pv.edit_question),
(r'^poll/question/add$', pv.a
|
dd_question),
(r'^poll/log$', pv.message_log),
# ajax
(r'^poll/moderate/(?P<id>\d+)/(?P<status>win|fail)$', pv.moderate),
(r'^poll/correct/(?P<id>\d+)$', pv.correction),\
)
|
belokop-an/agenda-tools
|
code/htdocs/categoryConfCreationControl.py
|
Python
|
gpl-2.0
| 677
| 0.048744
|
import MaKaC.webinterface.rh.categoryMod as categoryMod
def index(req, **params):
return catego
|
ryMod.RHCategoryConfCreationControl( req ).process( param
|
s )
def setCreateConferenceControl( req, **params ):
return categoryMod.RHCategorySetConfControl( req ).process( params )
def selectAllowedToCreateConf( req, **params ):
return categoryMod.RHCategorySelectConfCreators( req ).process( params )
def addAllowedToCreateConferences( req, **params ):
return categoryMod.RHCategoryAddConfCreators( req ).process( params )
def removeAllowedToCreateConferences( req, **params ):
return categoryMod.RHCategoryRemoveConfCreators( req ).process( params )
|
kayak/pypika
|
pypika/tests/dialects/test_postgresql.py
|
Python
|
apache-2.0
| 9,532
| 0.003462
|
import unittest
from collections import OrderedDict
from pypika import (
Array,
Field,
JSON,
QueryException,
Table,
)
from pypika.dialects import PostgreSQLQuery
class InsertTests(unittest.TestCase):
table_abc = Table("abc")
def test_array_keyword(self):
q = PostgreSQLQuery.into(self.table_abc).insert(1, [1, "a", True])
self.assertEqual("INSERT INTO \"abc\" VALUES (1,ARRAY[1,'a',true])", str(q))
class JSONObjectTests(unittest.TestCase):
def test_alias_set_correctly(self):
table = Table('jsonb_table')
q = PostgreSQLQuery.from_('abc').select(table.value.get_text_value('a').as_('name'))
self.assertEqual('''SELECT "value"->>'a' "name" FROM "abc"''', str(q))
def test_json_value_from_dict(self):
q = PostgreSQLQuery.select(JSON({"a": "foo"}))
self.assertEqual('SELECT \'{"a":"foo"}\'', str(q))
def test_json_value_from_array_num(self):
q = PostgreSQLQuery.select(JSON([1, 2, 3]))
self.assertEqual("SELECT '[1,2,3]'", str(q))
def test_json_value_from_array_str(self):
q = PostgreSQLQuery.select(JSON(["a", "b", "c"]))
self.assertEqual('SELECT \'["a","b","c"]\'', str(q))
def test_json_value_from_dict_recursive(self):
q = PostgreSQLQuery.select(JSON({"a": "z", "b": {"c": "foo"}, "d": 1}))
# gotta split this one up to avoid the indeterminate order
sql = str(q)
start, end = 9, -2
self.assertEqual("SELECT '{}'", sql[:start] + sql[end:])
members_set = set(sql[start:end].split(","))
self.assertSetEqual({'"a":"z"', '"b":{"c":"foo"}', '"d":1'}, members_set)
class JSONOperatorsTests(unittest.TestCase):
# reference https://www.postgresql.org/docs/9.5/functions-json.html
table_abc = Table("abc")
def test_get_json_value_by_key(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_json_value("dates"))
self.assertEqual('SELECT * FROM "abc" WHERE "json"->\'dates\'', str(q))
def test_get_json_value_by_index(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_json_value(1))
self.assertEqual('SELECT * FROM "abc" WHERE "json"->1', str(q))
def test_get_text_value_by_key(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_text_value("dates"))
self.assertEqual('SELECT * FROM "abc" WHERE "json"->>\'dates\'', str(q))
def test_get_text_value_by_index(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_text_value(1))
self.assertEqual('SELECT * FROM "abc" WHERE "json"->>1', str(q))
def test_get_path_json_value(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_path_json_value("{a,b}"))
self.assertEqual('SELECT * FROM "abc" WHERE "json"#>\'{a,b}\'', str(q))
def test_get_path_text_value(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.get_path_text_value("{a,b}"))
self.assertEqual('SELECT * FROM "abc" WHERE "json"#>>\'{a,b}\'', str(q))
class JSONBOperatorsTests(unittest.TestCase):
# reference https://www.postgresql.org/docs/9.5/functions-json.html
table_abc = Table("abc")
def test_json_contains_for_json(self):
q = PostgreSQLQuery.select(JSON({"a": 1, "b": 2}).contains({"a": 1}))
# gotta split this one up to avoid the indeterminate order
sql = str(q)
start, end = 9, -13
self.assertEqual("SELECT '{}'@>'{\"a\":1}'", sql[:start] + sql[end:])
members_set = set(sql[start:end].split(","))
self.assertSetEqual({'"a":1', '"b":2'}, members_set)
def test_json_contains_for_field(self):
q = (
PostgreSQLQuery.from_(self.table_abc)
.select("*")
.where(self.table_abc.json.contains({"dates": "2018-07-10 - 2018-07-17"}))
)
self.assertEqual(
"SELECT * " 'FROM "abc" ' 'WHERE "json"@>\'{"dates":"2018-07-10 - 2018-07-17"}\'',
str(q),
)
def test_json_contained_by_using_str_arg(self):
q = (
PostgreSQLQuery.from_(self.table_abc)
.select("*")
.where(
self.table_abc.json.contained_by(
OrderedDict(
[
("dates", "2018-07-10 - 2018-07-17"),
("imported", "8"),
]
)
)
)
)
self.assertEqual(
'SELECT * FROM "abc" ' 'WHERE "json"<@\'{"dates":"2018-07-10 - 2018-07-17","imported":"8"}\'',
str(q),
)
def test_json_contained_by_using_list_arg(self):
q = (
PostgreSQLQuery.from_(self.table_abc)
.select("*")
.where(self.table_abc.json.contained_by(["One", "Two", "Three"]))
)
self.assertEqual('SELECT * FROM "abc" WHERE "json"<@\'["One","Two","Three"]\'', str(q))
def test_json_contained_by_with_complex_criterion(self):
q = (
PostgreSQLQuery.from_(self.table_abc)
.select("*")
.where(self.table_abc.json.contained_by(["One", "Two", "Three"]) & (self.table_abc.id == 26))
)
self.assertEqual(
'SELECT * FROM "abc" WHERE "json"<@\'["One","Two","Three"]\' AND "id"=26',
str(q),
)
def test_json_has_key(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.has_key("dates"))
self.assertEqual('SELECT * FROM "abc" WHERE "json"?\'dates\'', str(q))
def test_json_has_keys(self):
q = PostgreSQLQuery.from_(self.table_abc).select("*").where(self.table_abc.json.has_keys(["dates", "imported"]))
self.assertEqual("SELECT * FROM \"abc\" WHERE \"json\"?&ARRAY['dates','imported']", str(q))
def test_json_has_any_keys(self):
q = (
PostgreSQLQuery.from_(self.table_abc)
.select("*")
.where(self.table_abc.json.has_any_keys(["dates", "imported"]))
)
self.assertEqual("SELECT * FROM \"abc\" WHERE \"json\"?|ARRAY['dates','imported']", str(q))
def test_subnet_contains_inet(self):
q = (
PostgreSQLQuery.from_(self.table_abc)
.select(self.table_abc.a.lshift(2))
.where(self.table_abc.cidr >> "1.1.1.1")
)
self.assertEqual("SELECT \"a\"<<2 FROM \"abc\" WHERE \"cidr\">>'1.1.1.1'", str(q))
class DistinctOnTests(unittest.TestCase):
table_abc = Table("abc")
def test_distinct_on(self):
q = PostgreSQLQuery.from_(self.table_abc).distinct_on("lname", self.table_abc.fname).select("lname", "id")
self.assertEqual('''SELECT DISTINCT ON("lname","fname") "lname","id" FROM "abc"''', str(q))
class ArrayTests(unittest.TestCase):
def test_array_syntax(self):
tb = Table("tb")
q = PostgreSQLQuery.from_(tb).select(Array(1, "a", ["b", 2, 3]))
self.assertEqual(str(q), "SELECT ARRAY[1,'a',ARRAY['b',2,3]] FROM \"tb\"")
def test_render_alias_in_array_sql(self):
tb = Table("tb")
q = PostgreSQLQuery.from_(tb).select(Array(tb.col).as_("different_name"))
self.assertEqual(str(q), 'SELECT ARRAY["col"] "different_name" FROM "tb"')
class ReturningClauseTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.table_abc = Table('abc')
def test_returning_from_missing_table_raises_queryexception(self):
field_from_diff_table = Field('xyz', table=Table('other'))
with self.assertRaisesRegex(QueryException, "You can't return from other tables"):
(
|
PostgreSQLQuery.from_(self.table_abc)
.where(self.table_abc.foo == sel
|
f.table_abc.bar)
.delete()
.returning(field_from_diff_table)
)
def test_queryexception_if_returning_used_on_invalid_query(self):
with
|
chaicko/AlgorithmicToolbox
|
test/data_structures/test_binary_search_trees.py
|
Python
|
gpl-3.0
| 7,486
| 0.001336
|
# import data_structures.binary_search_trees.rope as rope
import data_structures.binary_search_trees.set_range_sum as set_range_sum
import data_structures.binary_search_trees.tree_orders as
|
tree_orders
import pytest
import os
import sys
import resource
CI = os.environ.get('CI') == 'true'
# Helpers
class BinarySearchTree:
def __init__(self):
self.root = None
self.size = 0
def length(self):
return self.size
def __len__(self):
return self.size
def __iter__(self):
return self.root.__iter__()
def get(self, key)
|
:
if self.root:
res = self.find(key, self.root)
if res and res.key == key:
return res.payload
else:
return None
else:
return None
def find(self, key, node):
if node.key == key:
return node
if key < node.key:
if not node.has_left_child():
return node
return self.find(key, node.left_child)
else:
if not node.has_right_child():
return node
return self.find(key, node.right_child)
def __getitem__(self, key):
return self.get(key)
def __contains__(self, key):
if self.get(key):
return True
else:
return False
def put(self, key, val):
if self.root:
print("put on empty")
self._put(key, val, self.root)
else:
print("put on non empty")
self.root = TreeNode(key, val)
self.size += 1
def _put(self, key, val, node):
_parent = self.find(key, node)
if _parent.key == key: # already exists, replace values
_parent.replace_node_data(key, val, _parent.left_child,
_parent.right_child)
return
# At this point is guaranteed that _parent has null child
if key < _parent.key:
assert not _parent.has_left_child()
_parent.left_child = TreeNode(key, val, parent=_parent)
else:
assert not _parent.has_right_child()
_parent.right_child = TreeNode(key, val, parent=_parent)
def __setitem__(self, k, v):
"""
Allows usage of [].
:param k:
:param v:
:return:
"""
self.put(k, v)
class TreeNode:
def __init__(self, key, val, left=None, right=None, parent=None):
self.key = key
self.payload = val
self.left_child = left
self.right_child = right
self.parent = parent
def has_left_child(self):
return self.left_child
def has_right_child(self):
return self.right_child
def is_left_child(self):
return self.parent and self.parent.leftChild == self
def is_right_child(self):
return self.parent and self.parent.rightChild == self
def is_root(self):
return not self.parent
def is_leaf(self):
return not (self.right_child or self.left_child)
def has_any_children(self):
return self.right_child or self.left_child
def has_both_children(self):
return self.right_child and self.left_child
def replace_node_data(self, key, value, lc, rc):
self.key = key
self.payload = value
self.left_child = lc
self.right_child = rc
if self.has_left_child():
self.left_child.parent = self
if self.has_right_child():
self.right_child.parent = self
@pytest.mark.timeout(6)
class TestTreeOrders:
@classmethod
def setup_class(cls):
""" setup any state specific to the execution of the given class (which
usually contains tests).
"""
sys.setrecursionlimit(10 ** 6) # max depth of recursion
resource.setrlimit(resource.RLIMIT_STACK, (2 ** 27, 2 ** 27))
@pytest.mark.parametrize("n,key,left,right,exp_inorder,exp_preorder,exp_postorder", [
(5,
[4, 2, 5, 1, 3],
[1, 3, -1, -1, -1],
[2, 4, -1, -1, -1],
[1, 2, 3, 4, 5], [4, 2, 1, 3, 5], [1, 3, 2, 5, 4]),
(10,
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90],
[7, -1, -1, 8, 3, -1, 1, 5, -1, -1],
[2, -1, 6, 9, -1, -1, -1, 4, -1, -1],
[50, 70, 80, 30, 90, 40, 0, 20, 10, 60],
[0, 70, 50, 40, 30, 80, 90, 20, 60, 10],
[50, 80, 90, 30, 40, 70, 10, 60, 20, 0])
])
def test_samples(self, n,key,left,right,exp_inorder,exp_preorder,exp_postorder):
tree = tree_orders.TreeOrders(n, key, left, right)
assert exp_inorder == tree.order(tree.in_order)
assert exp_preorder == tree.order(tree.pre_order)
assert exp_postorder == tree.order(tree.post_order)
@pytest.mark.timeout(120)
class TestSetRangeSum:
@classmethod
def setup_class(cls):
""" setup any state specific to the execution of the given class (which
usually contains tests).
"""
del set_range_sum.root
set_range_sum.root = None
@pytest.mark.parametrize(
"test_input,expected", [(
(
"? 1",
"+ 1",
"? 1",
"+ 2",
"s 1 2",
"+ 1000000000",
"? 1000000000",
"- 1000000000",
"? 1000000000",
"s 999999999 1000000000",
"- 2",
"? 2",
"- 0",
"+ 9",
"s 0 9"
),
[
"Not found",
"Found",
"3",
"Found",
"Not found",
"1",
"Not found",
"10",
]), (
(
"? 0",
"+ 0",
"? 0",
"- 0",
"? 0",
),
[
"Not found",
"Found",
"Not found"
]), (
(
"+ 491572259",
"? 491572259",
"? 899375874",
"s 310971296 877523306",
"+ 352411209",
),
[
"Found",
"Not found",
"491572259"
]),
# (
# (
# "s 88127140 859949755",
# "s 407584225 906606553",
# "+ 885530090",
# "+ 234423189",
# "s 30746291 664192454",
# "+ 465752492",
# "s 848498590 481606032",
# "+ 844636782",
# "+ 251529178",
# "+ 182631153",
# ),
# [
# "0",
# "0",
# "234423189"
# ])
])
def test_samples(self, test_input, expected):
result = []
processor = set_range_sum.RangeSumProcessor()
for cmd in test_input:
res = processor.process(cmd)
if res:
result.append(res)
assert result == expected
# def test_input_files(self):
# result = []
# processor = set_range_sum.RangeSumProcessor()
# for cmd in test_input:
# res = processor.process(cmd)
# if res:
# result.append(res)
# assert result == expected
|
newvem/pytz
|
pytz/zoneinfo/America/Inuvik.py
|
Python
|
mit
| 5,554
| 0.206338
|
'''tzinfo timezone information for America/Inuvik.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Inuvik(DstTzInfo):
'''America/Inuvik timezone definition. See datetime.tzinfo for details'''
zone = 'America/Inuvik'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,4,14,10,0,0),
d(1918,10,27,9,0,0),
d(1919,5,25,10,0,0),
d(1919,11,1,7,0,0),
d(1942,2,9,10,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,9,0,0),
d(1965,4,25,8,0,0),
d(1965,10,31,8,0,0),
d(1979,4,29,10,0,0),
d(1980,4,27,9,0,0),
d(1980,10,26,8,0,0),
d(1981,4,26,9,0,0),
d(1981,10,25,8,0,0),
d(1982,4,25,9,0,0),
d(1982,10,31,8,0,0),
d(1983,4,24,9,0,0),
d(1983,10,30,8,0,0),
d(1984,4,29,9,0,0),
d(1984,10,28,8,0,0),
d(1985,4,28,9,0,0),
d(1985,10,27,8,0,0),
d(1986,4,27,9,0,0),
d(1986,10,26,8,0,0),
d(1987,4,5,9,0,0),
d(1987,10,25,8,0,0),
d(1988,4,3,9,0,0),
d(1988,10,30,8,0,0),
d(1989,4,2,9,0,0),
d(1989,10,29,8,0,0),
d(1990,4,1,9,0,0),
d(1990,10,28,8,0,0),
d(1991,4,7,9,0,0),
d(1991,10,27,8,0,0),
d(1992,4,5,9,0,0),
d(1992,10,
|
25,8,0,0),
d(199
|
3,4,4,9,0,0),
d(1993,10,31,8,0,0),
d(1994,4,3,9,0,0),
d(1994,10,30,8,0,0),
d(1995,4,2,9,0,0),
d(1995,10,29,8,0,0),
d(1996,4,7,9,0,0),
d(1996,10,27,8,0,0),
d(1997,4,6,9,0,0),
d(1997,10,26,8,0,0),
d(1998,4,5,9,0,0),
d(1998,10,25,8,0,0),
d(1999,4,4,9,0,0),
d(1999,10,31,8,0,0),
d(2000,4,2,9,0,0),
d(2000,10,29,8,0,0),
d(2001,4,1,9,0,0),
d(2001,10,28,8,0,0),
d(2002,4,7,9,0,0),
d(2002,10,27,8,0,0),
d(2003,4,6,9,0,0),
d(2003,10,26,8,0,0),
d(2004,4,4,9,0,0),
d(2004,10,31,8,0,0),
d(2005,4,3,9,0,0),
d(2005,10,30,8,0,0),
d(2006,4,2,9,0,0),
d(2006,10,29,8,0,0),
d(2007,3,11,9,0,0),
d(2007,11,4,8,0,0),
d(2008,3,9,9,0,0),
d(2008,11,2,8,0,0),
d(2009,3,8,9,0,0),
d(2009,11,1,8,0,0),
d(2010,3,14,9,0,0),
d(2010,11,7,8,0,0),
d(2011,3,13,9,0,0),
d(2011,11,6,8,0,0),
d(2012,3,11,9,0,0),
d(2012,11,4,8,0,0),
d(2013,3,10,9,0,0),
d(2013,11,3,8,0,0),
d(2014,3,9,9,0,0),
d(2014,11,2,8,0,0),
d(2015,3,8,9,0,0),
d(2015,11,1,8,0,0),
d(2016,3,13,9,0,0),
d(2016,11,6,8,0,0),
d(2017,3,12,9,0,0),
d(2017,11,5,8,0,0),
d(2018,3,11,9,0,0),
d(2018,11,4,8,0,0),
d(2019,3,10,9,0,0),
d(2019,11,3,8,0,0),
d(2020,3,8,9,0,0),
d(2020,11,1,8,0,0),
d(2021,3,14,9,0,0),
d(2021,11,7,8,0,0),
d(2022,3,13,9,0,0),
d(2022,11,6,8,0,0),
d(2023,3,12,9,0,0),
d(2023,11,5,8,0,0),
d(2024,3,10,9,0,0),
d(2024,11,3,8,0,0),
d(2025,3,9,9,0,0),
d(2025,11,2,8,0,0),
d(2026,3,8,9,0,0),
d(2026,11,1,8,0,0),
d(2027,3,14,9,0,0),
d(2027,11,7,8,0,0),
d(2028,3,12,9,0,0),
d(2028,11,5,8,0,0),
d(2029,3,11,9,0,0),
d(2029,11,4,8,0,0),
d(2030,3,10,9,0,0),
d(2030,11,3,8,0,0),
d(2031,3,9,9,0,0),
d(2031,11,2,8,0,0),
d(2032,3,14,9,0,0),
d(2032,11,7,8,0,0),
d(2033,3,13,9,0,0),
d(2033,11,6,8,0,0),
d(2034,3,12,9,0,0),
d(2034,11,5,8,0,0),
d(2035,3,11,9,0,0),
d(2035,11,4,8,0,0),
d(2036,3,9,9,0,0),
d(2036,11,2,8,0,0),
d(2037,3,8,9,0,0),
d(2037,11,1,8,0,0),
]
_transition_info = [
i(-28800,0,'PST'),
i(-25200,3600,'PDT'),
i(-28800,0,'PST'),
i(-25200,3600,'PDT'),
i(-28800,0,'PST'),
i(-25200,3600,'PWT'),
i(-25200,3600,'PPT'),
i(-28800,0,'PST'),
i(-21600,7200,'PDDT'),
i(-28800,0,'PST'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
]
Inuvik = Inuvik()
|
babyliynfg/cross
|
tools/project-creator/Python2.6.6/Lib/test/test_scope.py
|
Python
|
mit
| 16,217
| 0.003761
|
import unittest
from test.test_support import (check_syntax_error, _check_py3k_warnings,
check_warnings, run_unittest)
class ScopeTests(unittest.TestCase):
def testSimpleNesting(self):
def make_adder(x):
def adder(y):
return x + y
return adder
inc = make_adder(1)
plus10 = make_adder(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testExtraNesting(self):
def make_adder2(x):
def extra(): # check freevars passing through non-use scopes
def adder(y):
return x + y
return adder
return extra()
inc = make_adder2(1)
plus10 = make_adder2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testSimpleAndRebinding(self):
def make_adder3(x):
def adder(y):
return x + y
x = x + 1 # check tracking of assignment to x in defining scope
return adder
inc = make_adder3(0)
plus10 = make_adder3(9)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingGlobalNoFree(self):
def make_adder4(): # XXX add exta level of indirection
def nest():
def nest():
def adder(y):
return global_x + y # check that plain old globals work
return adder
return nest()
return nest()
global_x = 1
adder = make_adder4()
self.assertEqual(adder(1), 2)
global_x = 10
self.assertEqual(adder(-2), 8)
def testNestingThroughClass(self):
def make_adder5(x):
class Adder:
def __call__(self, y):
return x + y
return Adder()
inc = make_adder5(1)
plus10 = make_adder5(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingPlusFreeRefToGlobal(self):
def make_adder6(x):
global global_nest_x
def adder(y):
return global_nest_x + y
global_nest_x = x
return adder
inc = make_adder6(1)
plus10 = make_adder6(10)
self.assertEqual(inc(1), 11) # there's only one global
self.assertEqual(plus10(-2), 8)
def testNearestEnclosingScope(self):
def f(x):
def g(y):
x = 42 # check that this masks binding in f()
def h(z):
return x + z
return h
return g(2)
test_func = f(10)
self.assertEqual(test_func(5), 47)
def testMixedFreevarsAndCellvars(self):
def identity(x):
return x
def f(x, y, z):
def g(a, b, c):
a = a + x # 3
def h():
# z * (4 + 9)
# 3 * 13
return identity(z * (b + y))
y = c + z # 9
return h
return g
g = f(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 39)
def testFreeVarInMethod(self):
def test():
method_and_var = "var"
class Test:
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
return Test()
t = test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
method_and_var = "var"
class Test:
# this class is not nested, so the rules are different
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
t = Test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
def testRecursion(self):
def f(x):
def fact(n):
if n == 0:
return 1
else:
ret
|
urn n * fact(n - 1)
if x >= 0:
return fact(x)
else:
raise ValueError,
|
"x must be >= 0"
self.assertEqual(f(6), 720)
def testUnoptimizedNamespaces(self):
check_syntax_error(self, """\
def unoptimized_clash1(strip):
def f(s):
from string import *
return strip(s) # ambiguity: free or local
return f
""")
check_syntax_error(self, """\
def unoptimized_clash2():
from string import *
def f(s):
return strip(s) # ambiguity: global or local
return f
""")
check_syntax_error(self, """\
def unoptimized_clash2():
from string import *
def g():
def f(s):
return strip(s) # ambiguity: global or local
return f
""")
# XXX could allow this for exec with const argument, but what's the point
check_syntax_error(self, """\
def error(y):
exec "a = 1"
def f(x):
return x + y
return f
""")
check_syntax_error(self, """\
def f(x):
def g():
return x
del x # can't del name
""")
check_syntax_error(self, """\
def f():
def g():
from string import *
return strip # global or local?
""")
# and verify a few cases that should work
exec """
def noproblem1():
from string import *
f = lambda x:x
def noproblem2():
from string import *
def f(x):
return x + 1
def noproblem3():
from string import *
def f(x):
global y
y = x
"""
def testLambdas(self):
f1 = lambda x: lambda y: x + y
inc = f1(1)
plus10 = f1(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f2 = lambda x: (lambda : lambda y: x + y)()
inc = f2(1)
plus10 = f2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f3 = lambda x: lambda y: global_x + y
global_x = 1
inc = f3(None)
self.assertEqual(inc(2), 3)
f8 = lambda x, y, z: lambda a, b, c: lambda : z * (b + y)
g = f8(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 18)
def testUnboundLocal(self):
def errorInOuter():
print y
def inner():
return y
y = 1
def errorInInner():
def inner():
return y
inner()
y = 1
try:
errorInOuter()
except UnboundLocalError:
pass
else:
self.fail()
try:
errorInInner()
except NameError:
pass
else:
self.fail()
# test for bug #1501934: incorrect LOAD/STORE_GLOBAL generation
exec """
global_x = 1
def f():
global_x += 1
try:
f()
except UnboundLocalError:
pass
else:
fail('scope of global_x not correctly determined')
""" in {'fail': self.fail}
def testComplexDefinitions(self):
def makeReturner(*lst):
def returner():
return lst
return returner
self.assertEqual(makeReturner(1,2,3)(), (1,2,3))
def makeReturner2(**kwargs):
def returner():
|
iambocai/ansible
|
v2/test/errors/test_errors.py
|
Python
|
gpl-3.0
| 3,194
| 0.003444
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public L
|
icense
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.parsing.yaml.objects import Ansible
|
BaseYAMLObject
from ansible.errors import AnsibleError
from ansible.compat.tests import BUILTINS
from ansible.compat.tests.mock import mock_open, patch
class TestErrors(unittest.TestCase):
def setUp(self):
self.message = 'This is the error message'
self.obj = AnsibleBaseYAMLObject()
def tearDown(self):
pass
def test_basic_error(self):
e = AnsibleError(self.message)
self.assertEqual(e.message, self.message)
self.assertEqual(e.__repr__(), self.message)
@patch.object(AnsibleError, '_get_error_lines_from_file')
def test_error_with_object(self, mock_method):
self.obj._data_source = 'foo.yml'
self.obj._line_number = 1
self.obj._column_number = 1
mock_method.return_value = ('this is line 1\n', '')
e = AnsibleError(self.message, self.obj)
self.assertEqual(e.message, "This is the error message\nThe error appears to have been in 'foo.yml': line 1, column 1,\nbut may actually be before there depending on the exact syntax problem.\n\nthis is line 1\n^\n")
def test_get_error_lines_from_file(self):
m = mock_open()
m.return_value.readlines.return_value = ['this is line 1\n']
with patch('{0}.open'.format(BUILTINS), m):
# this line will be found in the file
self.obj._data_source = 'foo.yml'
self.obj._line_number = 1
self.obj._column_number = 1
e = AnsibleError(self.message, self.obj)
self.assertEqual(e.message, "This is the error message\nThe error appears to have been in 'foo.yml': line 1, column 1,\nbut may actually be before there depending on the exact syntax problem.\n\nthis is line 1\n^\n")
# this line will not be found, as it is out of the index range
self.obj._data_source = 'foo.yml'
self.obj._line_number = 2
self.obj._column_number = 1
e = AnsibleError(self.message, self.obj)
self.assertEqual(e.message, "This is the error message\nThe error appears to have been in 'foo.yml': line 2, column 1,\nbut may actually be before there depending on the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)")
|
Ecotrust/forestplanner
|
lot/trees/tests/__init__.py
|
Python
|
bsd-3-clause
| 25
| 0
|
from .unittest
|
s import *
| |
prakashpp/trytond-google-merchant
|
channel.py
|
Python
|
bsd-3-clause
| 797
| 0
|
# -*- coding: utf-8 -*-
"""
channel.py
:copyright: (c) 2015 by Fulfil.IO Inc.
:license: see LICENSE for more details.
"""
from trytond.pool import PoolMeta
from trytond.model import fields
__all__ = ['Channel']
__metaclass__ = PoolMeta
def submit_to_google(url, data):
import requests
import json
return requests.post(
url,
data=json.dumps(data),
headers={
'Content-Type': 'application/json',
'Authorization': 'Bearer ya29.5AE7v1wOfgun1gR_iXwuGhMnt8nPNbT4C-Pd
|
39DUnsNGb9I6U5FQqRJXNyPb3a0Dk1OWzA', # noqa
}
)
class Channel:
__name__ = "sale.channel"
website = fields.Many2One('nereid.website', 'Website', s
|
elect=True)
@classmethod
def upload_products_to_google_merchant(cls):
pass
|
elena/django
|
tests/check_framework/test_multi_db.py
|
Python
|
bsd-3-clause
| 1,726
| 0.000579
|
from unittest import mock
from django.db import connections, models
from django.test import SimpleTestCase
from django.test.utils import isolate_apps, override_settings
class TestRouter:
"""
Routes to the 'other' database if the model name starts with 'Other'.
"""
def allow_
|
migrate(self, db, app_label, model_name=None, **hints):
return db == ('other' if model_name.startswith('other') else 'default')
@override_settings(DATABASE_ROUTERS=[TestRouter()])
@isolate_apps('check_framework')
class TestMultiDBChecks(SimpleTestCase):
def _patch_check_field_on(self, db):
return mock.patch.object(connectio
|
ns[db].validation, 'check_field')
def test_checks_called_on_the_default_database(self):
class Model(models.Model):
field = models.CharField(max_length=100)
model = Model()
with self._patch_check_field_on('default') as mock_check_field_default:
with self._patch_check_field_on('other') as mock_check_field_other:
model.check(databases={'default', 'other'})
self.assertTrue(mock_check_field_default.called)
self.assertFalse(mock_check_field_other.called)
def test_checks_called_on_the_other_database(self):
class OtherModel(models.Model):
field = models.CharField(max_length=100)
model = OtherModel()
with self._patch_check_field_on('other') as mock_check_field_other:
with self._patch_check_field_on('default') as mock_check_field_default:
model.check(databases={'default', 'other'})
self.assertTrue(mock_check_field_other.called)
self.assertFalse(mock_check_field_default.called)
|
popazerty/try
|
lib/python/Plugins/Extensions/IniLastFM/LastFMConfig.py
|
Python
|
gpl-2.0
| 3,064
| 0.014687
|
from Screens.Screen import Screen
from Components.config import config, getConfigListEntry, ConfigSubsection, ConfigText
from Components.ConfigList import ConfigListScreen
from Components.Label import Label
from Components.ActionMap import ActionMap
# for localized messages
from . import _
class LastFMConfigScreen(ConfigListScreen,Screen):
config.plugins.LastFM = ConfigSubsection()
config.plugins.LastFM.name = ConfigText(default = _("Last.FM"))
def __init__(self, session, args = 0):
self.session = session
Screen.__init__(self, session)
self.skinName = ["Setup"]
self.list = [
getConfigListEntry(_("Show in (needs GUI restart)"), config.plugins.LastFM.menu),
getConfigListEntry(_("Name (needs GUI restart)"), config.plugins.LastFM.name),
getConfigListEntry(_("Description"
|
), config.plugins.LastFM.description),
getConfigListEntry(_("Last.FM Username"), config.plugins.LastFM.us
|
ername),
getConfigListEntry(_("Password"), config.plugins.LastFM.password),
getConfigListEntry(_("Send now playing Audio Tracks"), config.plugins.LastFM.sendSubmissions),
getConfigListEntry(_("Use LastFM Proxy"), config.plugins.LastFM.useproxy),
getConfigListEntry(_("LastFM Proxy port"), config.plugins.LastFM.proxyport),
getConfigListEntry(_("Recommendation level"), config.plugins.LastFM.recommendedlevel),
getConfigListEntry(_("Show Coverart"), config.plugins.LastFM.showcoverart),
getConfigListEntry(_("Timeout Statustext (seconds)"), config.plugins.LastFM.timeoutstatustext),
getConfigListEntry(_("Timeout to select a Tab (seconds)"), config.plugins.LastFM.timeouttabselect),
getConfigListEntry(_("Interval to refresh Metadata (seconds)"), config.plugins.LastFM.metadatarefreshinterval),
getConfigListEntry(_("Use Screensaver"), config.plugins.LastFM.sreensaver.use),
getConfigListEntry(_("Wait before Screensaver (seconds)"), config.plugins.LastFM.sreensaver.wait),
getConfigListEntry(_("Show Coverart in Screensaver"), config.plugins.LastFM.sreensaver.showcoverart),
getConfigListEntry(_("Show Coverart Animation in Screensaver"), config.plugins.LastFM.sreensaver.coverartanimation),
getConfigListEntry(_("Speed for Coverart Animation"), config.plugins.LastFM.sreensaver.coverartspeed),
getConfigListEntry(_("Interval for Coverart Animation"), config.plugins.LastFM.sreensaver.coverartinterval),
]
ConfigListScreen.__init__(self, self.list)
self["key_red"] = Label(_("Cancel"))
self["key_green"] = Label(_("OK"))
self["setupActions"] = ActionMap(["SetupActions"],
{
"green": self.save,
"red": self.cancel,
"save": self.save,
"cancel": self.cancel,
"ok": self.save,
}, -2)
def save(self):
print "saving"
for x in self["config"].list:
x[1].save()
self.close(True)
def cancel(self):
print "cancel"
for x in self["config"].list:
x[1].cancel()
self.close(False)
|
bjolivot/ansible
|
lib/ansible/modules/notification/telegram.py
|
Python
|
gpl-3.0
| 2,822
| 0.007087
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Artem Feofanov <artem.feofanov@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with An
|
sible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: telegram
version_added: "2.2"
author: "Artem Feofanov (@tyouxa)"
short_description: module for sending notifications via telegram
description:
- Send notifications via telegram bot, to a verified group or user
notes:
- Y
|
ou will require a telegram account and create telegram bot to use this module.
options:
msg:
description:
- What message you wish to send.
required: true
token:
description:
- Token identifying your telegram bot.
required: true
chat_id:
description:
- Telegram group or user chat_id
required: true
"""
EXAMPLES = """
- name: send a message to chat in playbook
telegram:
token: 'bot9999999:XXXXXXXXXXXXXXXXXXXXXXX'
chat_id: 000000
msg: Ansible task finished
"""
RETURN = """
msg:
description: The message you attempted to send
returned: success
type: string
sample: "Ansible task finished"
"""
import urllib
def main():
module = AnsibleModule(
argument_spec = dict(
token = dict(type='str',required=True,no_log=True),
chat_id = dict(type='str',required=True,no_log=True),
msg = dict(type='str',required=True)),
supports_check_mode=True
)
token = urllib.quote(module.params.get('token'))
chat_id = urllib.quote(module.params.get('chat_id'))
msg = urllib.quote(module.params.get('msg'))
url = 'https://api.telegram.org/' + token + '/sendMessage?text=' + msg + '&chat_id=' + chat_id
if module.check_mode:
module.exit_json(changed=False)
response, info = fetch_url(module, url)
if info['status'] == 200:
module.exit_json(changed=True)
else:
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
azilya/Zaliznyak-s-grammatical-dictionary
|
gdictionary/getfinal.py
|
Python
|
lgpl-3.0
| 1,310
| 0.012977
|
import csv, os
def adding(lemma_noun, file_noun, tags_noun, lemma_verb, file_verb, tags_verb):
"""
Creates a file with final results: lemmas for unknown words and Zaliznyak's tags for them.
|
"""
outfile = open('predicted.csv', 'w', encoding = 'utf-8')
writer = csv.writer(outfile, lineterminator='\n', delimiter=',')
writer.writerow(['Word', 'Class', 'Gender']) #csv format
infile_noun = open(file_noun, 'r', encoding ='utf-8')
gen =[]
for line in infile_noun:
elems = line.strip()
elems = elems.split(',')
gen.append(elems[3])
gen.r
|
emove(gen[0])
for i, elem in enumerate(lemma_noun):
one = elem
three = gen[i]
two = tags_noun[i]
writer.writerow([one, two, three])
infile_noun.close()
os.remove(file_noun)
##########################
##########################
infile_verb = open(file_verb, 'r', encoding ='utf-8')
for i, elem in enumerate(lemma_verb):
one = elem
two = tags_verb[i]
three = "-"
writer.writerow([one, two, three])
infile_verb.close()
os.remove(file_verb)
outfile.close()
return open('predicted.csv', encoding = 'utf-8').readlines()
|
opendaylight/netvirt
|
resources/tools/odltools/odltools/cli_utils.py
|
Python
|
epl-1.0
| 1,857
| 0.000539
|
# Copyright 2018 Red Hat, Inc. and others. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
logger = logging.getLogger("cli_utils")
def type_input_file(path):
if path == '-':
return path
if not os.path.isfile(path):
logger.error('File "%s" not found' % path)
raise argparse.ArgumentError
return path
def add_common_args(parser):
|
parser.add_argument("--path",
help="the directory that the parsed data is written into")
parser.add_argument("--transport", default="http",
choices=["http", "https"],
help="transport for connections")
|
parser.add_argument("-i", "--ip", default="localhost",
help="OpenDaylight ip address")
parser.add_argument("-t", "--port", default="8181",
help="OpenDaylight restconf port, default: 8181")
parser.add_argument("-u", "--user", default="admin",
help="OpenDaylight restconf username, default: admin")
parser.add_argument("-w", "--pw", default="admin",
help="OpenDaylight restconf password, default: admin")
parser.add_argument("-p", "--pretty_print", action="store_true",
help="json dump with pretty_print")
|
cornell-brg/pydgin
|
arm/bootstrap.py
|
Python
|
bsd-3-clause
| 8,853
| 0.029369
|
#=======================================================================
# bootstrap.py
#=======================================================================
from machine import State
from pydgin.utils import r_uint
#from pydgin.storage import Memory
EMULATE_GEM5 = False
EMULATE_SIMIT = True
# Currently these constants are set to match gem5
memory_size = 2**27
page_size = 8192
if EMULATE_SIMIT:
memory_size = 0xc0000000 + 1
MAX_ENVIRON = 1024 * 16
# MIPS stack starts at top of kuseg (0x7FFF.FFFF) and grows down
#stack_base = 0x7FFFFFFF
stack_base = memory_size-1 # TODO: set this correctly!
#-----------------------------------------------------------------------
# syscall_init
#-----------------------------------------------------------------------
#
# MIPS Memory Map (32-bit):
#
# 0xC000.0000 - Mapped (kseg2) - 1GB
# 0xA000.0000 - Unmapped uncached (kseg1) - 512MB
# 0x8000.0000 - Unmapped cached (kseg0) - 512MB
# 0x0000.0000 - 32-bit user space (kuseg) - 2GB
#
def syscall_init( mem, entrypoint, breakpoint, argv, envp, debug ):
#---------------------------------------------------------------------
# memory map initialization
#---------------------------------------------------------------------
# TODO: for multicore allocate 8MB for each process
#proc_stack_base[pid] = stack_base - pid * 8 * 1024 * 1024
# top of heap (breakpoint) # TODO: handled in load program
# memory maps: 1GB above top of heap
# mmap_start = mmap_end = break_point + 0x40000000
#---------------------------------------------------------------------
# stack argument initialization
#---------------------------------------------------------------------
# http://articles.manugarg.com/aboutelfauxiliaryvectors.html
#
# contents size
#
# 0x7FFF.FFFF [ end marker ] 4 (NULL)
# [ environment str data ] >=0
# [ arguments str data ] >=0
# [ padding ] 0-16
# [ auxv[n] data ] 8 (AT_NULL Vector)
# 8*x
# [ auxv[0] data ] 8
# [ envp[n] pointer ] 4 (NULL)
# 4*x
# [ envp[0] pointer ] 4
# [ argv[n] pointer ] 4 (NULL)
# 4*x
# [ argv[0] pointer ] 4 (program name)
# stack ptr-> [ argc ] 4 (size of argv)
#
# (stack grows down!!!)
#
# 0x7F7F.FFFF < stack limit for pid 0 >
#
# auxv variables initialized by gem5, are these needed?
#
# - PAGESZ: system page size
# - PHDR: virtual addr of program header tables
# (for statically linked binaries)
# - PHENT: size of program header entries in elf file
# - PHNUM: number of program headers in elf file
# - AT_ENRTY: program entry point
# - UID: user ID
# - EUID: effective user ID
# - GID: group ID
# - EGID: effective group ID
# TODO: handle auxv, envp variables
auxv = []
if EMULATE_GEM5:
argv = argv[1:]
argc = len( argv )
def sum_( x ):
val = 0
for i in x:
val += i
return val
# calculate sizes of sections
# TODO: parameterize auxv/envp/argv calc for variable int size?
stack_nbytes = [ 4, # end mark nbytes (sentry)
sum_([len(x)+1 for x in envp]), # envp_str nbytes
sum_([len(x)+1 for x in argv]), # argv_str nbytes
0, # padding nbytes
8*(len(auxv) + 1), # auxv nbytes
4*(len(envp) + 1), # envp nbytes
4*(len(argv) + 1), # argv nbytes
4 ] # argc nbytes
if EMULATE_SIMIT:
stack_nbytes[4] = 0 # don't to auxv for simit
def round_up( val ):
alignment = 16
return (val + alignment - 1) & ~(a
|
lignment - 1)
# calculate padding to align boundary
# NOTE: MIPs approach (but ignored by gem5)
#stack_nbytes[3] = 16 - (sum_(stack_nbytes[:3]) % 16)
# NOTE: gem5 ARM approach
stack_nbytes[3] = round_up( sum_(stack_nbytes) ) - sum_(stack_nbytes)
if EMULATE_SIMIT:
stack_nby
|
tes[3] = 0
def round_down( val ):
alignment = 16
return val & ~(alignment - 1)
# calculate stack pointer based on size of storage needed for args
# TODO: round to nearest page size?
stack_ptr = round_down( stack_base - sum_( stack_nbytes ) )
if EMULATE_SIMIT:
stack_ptr = stack_base - MAX_ENVIRON
offset = stack_ptr + sum_( stack_nbytes )
# FIXME: this offset seems really wrong, but this is how gem5 does it!
if EMULATE_GEM5:
offset = stack_base
print "XXX", offset
stack_off = []
for nbytes in stack_nbytes:
offset -= nbytes
stack_off.append( offset )
# FIXME: this is fails for GEM5's hacky offset...
if not EMULATE_GEM5:
assert offset == stack_ptr
if debug.enabled( 'bootstrap' ):
print 'stack base', hex( stack_base )
print 'stack min ', hex( stack_ptr )
print 'stack size', stack_base - stack_ptr
print
print 'sentry ', stack_nbytes[0]
print 'env d ', stack_nbytes[1]
print 'arg d ', stack_nbytes[2]
print 'padding', stack_nbytes[3]
print 'auxv ', stack_nbytes[4]
print 'envp ', stack_nbytes[5]
print 'argv ', stack_nbytes[6]
print 'argc ', stack_nbytes[7]
# utility functions
def str_to_mem( mem, val, addr ):
for i, char in enumerate(val+'\0'):
mem.write( addr + i, 1, ord( char ) )
return addr + len(val) + 1
def int_to_mem( mem, val, addr ):
# TODO properly handle endianess
for i in range( 4 ):
mem.write( addr+i, 1, (val >> 8*i) & 0xFF )
return addr + 4
# write end marker to memory
int_to_mem( mem, 0, stack_off[0] )
# write environment strings to memory
envp_ptrs = []
offset = stack_off[1]
for x in envp:
envp_ptrs.append( offset )
offset = str_to_mem( mem, x, offset )
assert offset == stack_off[0]
# write argument strings to memory
argv_ptrs = []
offset = stack_off[2]
for x in argv:
argv_ptrs.append( offset )
offset = str_to_mem( mem, x, offset )
assert offset == stack_off[1]
# write auxv vectors to memory
offset = stack_off[4]
if not EMULATE_SIMIT:
for type_, value in auxv + [(0,0)]:
offset = int_to_mem( mem, type_, offset )
offset = int_to_mem( mem, value, offset )
assert offset == stack_off[3]
# write envp pointers to memory
offset = stack_off[5]
for env in envp_ptrs + [0]:
offset = int_to_mem( mem, env, offset )
assert offset == stack_off[4]
# write argv pointers to memory
offset = stack_off[6]
for arg in argv_ptrs + [0]:
offset = int_to_mem( mem, arg, offset )
assert offset == stack_off[5]
# write argc to memory
offset = stack_off[7]
offset = int_to_mem( mem, argc, offset )
assert offset == stack_off[6]
# write zeros to bottom of stack
# TODO: why does gem5 do this?
offset = stack_off[7] - 1
while offset >= stack_ptr:
mem.write( offset, 1, ord( '\0' ) )
offset -= 1
# initialize processor state
state = State( mem, debug, reset_addr=0x1000 )
if debug.enabled( 'bootstrap' ):
print '---'
#print 'argc = %d (%x)' % ( argc, stack_off[-1] )
#for i, ptr in enumerate(argv_ptrs):
# print 'argv[%2d] = %x (%x)' % ( i, argv_ptrs[i], stack_off[-2]+4*i ),
# print len( argv[i] ), argv[i]
#print 'argd = %s (%x)' % ( argv[0], stack_off[-6] )
print '---'
print 'envd-base', hex(stack_off[-7])
print 'argd-base', hex(stack_off[-6])
print 'auxv-base', hex(stack_off[-4])
print 'envp-base', hex(stack_off[-3])
print 'argv-base', hex(stack_off[-2])
print 'argc-base', hex(stack_off[-1])
print 'STACK_PTR', hex( stack_ptr )
# TODO: where should this go?
#state.pc = entryp
|
JohnJakeChambers/break-the-vigenere
|
VigenereCracker.py
|
Python
|
gpl-3.0
| 3,920
| 0.009949
|
class VigenereCracker:
def __init__(self, language, minLen, maxLen):
self.LANGUAGE = language
#Key length could be from 1 to 13 bytes
self.KEYLENBOUNDS = range(minLen,maxLen)
self.SUMQPSQUARE = 0.065
self.KEYLENGTHFOUND = -1
self.KEY = []
self.CONTENT = None
def setContent(self, _content):
self.CONTENT = _content
def reset(self):
self.KEYLENGTHFOUND = -1
self.CONTENT = None
self.KEY
|
= []
def
|
__FoundKeyLen(self):
if not self.CONTENT:
return None
_KEYLENDICT_ = {}
for i in self.KEYLENBOUNDS:
retChar = self.takeCharEveryKPos(0, i, self.CONTENT)
mapChar = self.countOccurrenceAndFrequency(retChar)
_KEYLENDICT_[i] = mapChar
_kMAX = -1
_sumQsquareMAX = 0
for k in _KEYLENDICT_:
_val = self.computeSumQiSquare(_KEYLENDICT_[k])
if _val > _sumQsquareMAX:
_sumQsquareMAX = _val
_kMAX = k
self.KEYLENGTHFOUND = _kMAX
return _kMAX
def getKeyLen(self):
return self.KEYLENGTHFOUND
def FoundKey(self):
if not self.CONTENT:
return None
self.__FoundKeyLen()
if self.KEYLENGTHFOUND == -1:
return None
for i in range(self.KEYLENGTHFOUND):
_resultsDecrypt = {}
_firstTryCrypt = self.takeCharEveryKPos(i, self.KEYLENGTHFOUND, self.CONTENT)
for tryK in range(1,256):
_resultsDecrypt[tryK] = []
for el in _firstTryCrypt:
_resultsDecrypt[tryK].append(el ^ tryK)
_candidateDecrypt = {}
for tryK in _resultsDecrypt:
if self.verifyDecrypt(_resultsDecrypt[tryK]):
_candidateDecrypt[tryK] = _resultsDecrypt[tryK]
_maximizeK = 0
_maximizeSum = 0
for candidateK in _candidateDecrypt:
_map = self.countOccurrenceAndFrequency(_candidateDecrypt[candidateK])
_val = self.computeSumQPiSquareLowerCaseLetter(_map)
if abs(_val - self.SUMQPSQUARE) < abs(_maximizeSum - self.SUMQPSQUARE):
_maximizeK = candidateK
_maximizeSum = _val
self.KEY.append(_maximizeK)
return self.KEY
def takeCharEveryKPos(self, start_pos, k_pos, content):
_Index = start_pos
_retChars = []
_retChars.append(content[_Index])
_Index+=k_pos
while _Index < len(content):
_retChars.append(content[_Index])
_Index+=k_pos
return _retChars
def countOccurrenceAndFrequency(self, content):
_map = {}
for value in content:
if not value in _map:
_map[value] = {'Occurence':0,'Frequency':0}
_map[value]['Occurence'] += 1
for value in _map:
_map[value]['Frequency'] = float(_map[value]['Occurence'])/len(content)*1.0
return _map
def computeSumQiSquare(self, _map):
_sum = float(0.0)
for el in _map:
_q = _map[el]['Frequency']
_qsquare = _q * _q
_sum += _qsquare
return _sum
def computeSumQPiSquareLowerCaseLetter(self,_map):
_sum = float(0.0)
for el in _map:
if self.LANGUAGE.containsLetter(el):
_q = _map[el]['Frequency']
_qsquare = _q * self.LANGUAGE.getFrequency(el)
_sum += _qsquare
return _sum
def verifyDecrypt(self, content):
for el in content:
if el < 32 or el > 127:
return False
return True
|
openstack/heat
|
heat/tests/test_stack_lock.py
|
Python
|
apache-2.0
| 12,477
| 0
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/li
|
censes/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from heat.common import exception
from heat.common import service_u
|
tils
from heat.engine import stack_lock
from heat.objects import stack as stack_object
from heat.objects import stack_lock as stack_lock_object
from heat.tests import common
from heat.tests import utils
class StackLockTest(common.HeatTestCase):
def setUp(self):
super(StackLockTest, self).setUp()
self.context = utils.dummy_context()
self.stack_id = "aae01f2d-52ae-47ac-8a0d-3fde3d220fea"
self.engine_id = service_utils.generate_engine_id()
stack = mock.MagicMock()
stack.id = self.stack_id
stack.name = "test_stack"
stack.action = "CREATE"
self.mock_get_by_id = self.patchobject(
stack_object.Stack, 'get_by_id', return_value=stack)
class TestThreadLockException(Exception):
pass
def test_successful_acquire_new_lock(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
slock.acquire()
mock_create.assert_called_once_with(
self.context, self.stack_id, self.engine_id)
def test_failed_acquire_existing_lock_current_engine(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value=self.engine_id)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.mock_get_by_id.assert_called_once_with(
self.context,
self.stack_id,
show_deleted=True,
eager_load=False)
mock_create.assert_called_once_with(
self.context, self.stack_id, self.engine_id)
def test_successful_acquire_existing_lock_engine_dead(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
mock_steal = self.patchobject(stack_lock_object.StackLock,
'steal',
return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(service_utils, 'engine_alive', return_value=False)
slock.acquire()
mock_create.assert_called_once_with(
self.context, self.stack_id, self.engine_id)
mock_steal.assert_called_once_with(
self.context, self.stack_id, 'fake-engine-id', self.engine_id)
def test_failed_acquire_existing_lock_engine_alive(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(service_utils, 'engine_alive', return_value=True)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.mock_get_by_id.assert_called_once_with(
self.context,
self.stack_id,
show_deleted=True,
eager_load=False)
mock_create.assert_called_once_with(
self.context, self.stack_id, self.engine_id)
def test_failed_acquire_existing_lock_engine_dead(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
mock_steal = self.patchobject(stack_lock_object.StackLock,
'steal',
return_value='fake-engine-id2')
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(service_utils, 'engine_alive', return_value=False)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.mock_get_by_id.assert_called_once_with(
self.context,
self.stack_id,
show_deleted=True,
eager_load=False)
mock_create.assert_called_once_with(
self.context, self.stack_id, self.engine_id)
mock_steal.assert_called_once_with(
self.context, self.stack_id, 'fake-engine-id', self.engine_id)
def test_successful_acquire_with_retry(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
mock_steal = self.patchobject(stack_lock_object.StackLock,
'steal',
side_effect=[True, None])
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(service_utils, 'engine_alive', return_value=False)
slock.acquire()
mock_create.assert_has_calls(
[mock.call(self.context, self.stack_id, self.engine_id)] * 2)
mock_steal.assert_has_calls(
[mock.call(self.context, self.stack_id,
'fake-engine-id', self.engine_id)] * 2)
def test_failed_acquire_one_retry_only(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
mock_steal = self.patchobject(stack_lock_object.StackLock,
'steal',
return_value=True)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(service_utils, 'engine_alive', return_value=False)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.mock_get_by_id.assert_called_with(
self.context,
self.stack_id,
show_deleted=True,
eager_load=False)
mock_create.assert_has_calls(
[mock.call(self.context, self.stack_id, self.engine_id)] * 2)
mock_steal.assert_has_calls(
[mock.call(self.context, self.stack_id,
'fake-engine-id', self.engine_id)] * 2)
def test_context_mgr_exception(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=None)
stack_lock_object.StackLock.release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
def check_lock():
with slock:
self.assertEqual(1,
stack_lock_object.StackLock.create.call_count)
raise self.TestThreadLockException
self.assertRaises(self.TestThreadLockException, check_lock)
self.assertEqual(1, stack_lock_object.StackLock.release.call_count)
def test_context_mgr_noexception(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=None)
stack_lo
|
amiraliakbari/static-inspector
|
inspector/utils/visualization/graph.py
|
Python
|
mit
| 1,351
| 0.004441
|
# -*- coding: utf-8 -*-
import os
from inspector.utils.strings import render_template
def generate_graph_html(graph, filename):
"""
:type graph: networkx.Graph
:param str filename: path to save the generated html file
"""
params = {
'nodes': [],
'links': [],
}
ids = {}
for i, (node, data) in enumerate(graph.nodes(data=True)):
val = unicode(node)
ids[val] = i
params['nodes'].append('{name:"%s",group:%d}' % (node, data.get('group', 1)))
for u, v, data in graph.edges(data=Tru
|
e):
params['links'].append('{source:%d,target:%d,value:%d,group:%d}' % (ids[unicode(u)],
ids[unicode(v)],
data.get('weight', 1),
|
data.get('group', 1)))
params['nodes'] = ','.join(params['nodes'])
params['links'] = ','.join(params['links'])
# generating output
current_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(current_dir, 'templates', 'force_directed_graph.html'), 'r') as f:
html = f.read()
html = render_template(html, params)
with open(filename, 'w') as f:
f.write(html)
|
Herpinemmanuel/Oceanography
|
Cas_1/Salinity/A_General_Salinity.py
|
Python
|
mit
| 983
| 0.016277
|
import numpy as np
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from xmitgcm import open_mdsdataset
from car
|
topy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
plt.ion()
dir1 = '/homedata/bderembl/runmit/test_southatlgyre'
ds1 = open_mdsdataset(dir1,prefix=['S'])
nt = 0
nz = 0
# Cartography S : Salinity
plt.figure(1)
ax = plt.subplot
|
(projection=ccrs.PlateCarree());
ds1['S'][nt,nz,:,:].plot.pcolormesh('XC', 'YC', ax=ax);
plt.title('Case 1 : Salinity')
ax.coastlines()
gl = ax.gridlines(draw_labels=True, alpha = 0.5, linestyle='--');
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
plt.savefig('S_General_Salinity_cas1'+'.png')
plt.clf()
# Averages
Average_S = ds1.S.mean().values
print('Average of Salinity')
print(Average_S,'psu')
Average_S_mask = ds1.S.where(ds1.hFacC>0).mean().values
print('Average of Salinity without continents')
print(Average_S_mask,'psu')
|
YtoTech/latex-on-http
|
latexonhttp/resources/fetching.py
|
Python
|
agpl-3.0
| 4,503
| 0.000666
|
# -*- coding: utf-8 -*-
"""
latexonhttp.resources.fetching
~~~~~~~~~~~~~~~~~~~~~
Fetchers for resources.
:copyright: (c) 2019 Yoan Tournade.
:license: AGPL, see LICENSE for more details.
"""
import base64
import requests
import logging
logger = logging.getLogger(__name__)
# ; # TODO Extract the filesystem management in a module:
# ; # - determine of fs/files actions to get to construct the filesystem;
# ; # - support content/string, base64/file, url/file, url/git, url/tar, post-data/tar
# ; # - hash and make a (deterministic) signature of files uploaded;
# ; # - from the list of actions, prepare the file system (giving only a root directory);
# ; # (- add a cache management on the file system preparation subpart).
def fetcher_utf8_string(resource, _get_from_cache):
# TODO encode useful? Why we got an str here instead of unicode?
return resource["body_source"]["raw_string"].encode("utf-8"), None
def fetcher_base64_file(resource, _get_from_cache):
return base64.b64decode(resource["body_source"]["raw_base64"]), None
# TODO Make it configurable.
# (So we can - around other things - reduce the delay in test configuration)
HTTP_REQUEST_TIMEOUT = 10
def fetcher_url_file(resource, _get_from_cache):
url = resource["body_source"]["url"]
logger.info("Fetching file from %s", url)
try:
response = requests.get(url, timeout=HTTP_REQUEST_TIMEOUT)
logger.info(
"Fetch response %s of content length %d",
response.status_code,
len(response.content),
)
if response.status_code >= 300:
return (
None,
{
"error": "RESOURCE_FETCH_FAILURE",
"fetch_error": {
"type": "http_error",
"http_code": response.status_code,
"http_response_content": response.text,
},
"resource": resource,
},
)
return response.content, None
except requests.exceptions.Timeout as te:
return (
None,
{
"error": "RESOURCE_FETCH_FAILURE",
"fetch_error": {
"type": "request_timeout",
"exception_content": str(te),
"http_code": None,
"http_response_content": None,
},
"resource": resource,
},
)
except requests.exceptions.ConnectionError as cee:
return (
None,
{
"error": "RESOURCE_FETCH_FAILURE",
"fetch_error": {
"type": "connection_error",
"exception_content": str(cee),
"http_code": None,
"http_response_content": None,
},
"resource": resource,
},
)
def fetcher_hash_cache(resource, get_from_cache):
if not get_from_cache:
return None, "NO_CACHE_PROVIDER_ENABLED"
logger.debug("Trying to fetch from cache %s", resource)
cached_data = get_from_cache(resource)
if not cached_data:
return None, "CACHE_MISS"
return cached_data, None
FETCHERS = {
"utf8/string": fetcher_utf8_string,
"base64/file": fetcher_base64_file,
"url/file": fetcher_url_file,
"hash/cache": fetcher_hash_cache,
# TODO "url/git", "url/tar"
# TODO Support a base64/gz/file, for
|
compressed file upload?
}
def fetch_resources(resources, on_fetched, get_from_cache=None):
"""
on_fetched(resource, data)
get_from_cache(resource)
"""
# TODO Fetch cache? (URL, git, etc.)
# Managed by each fetcher, with options provided in input.
|
# (No fetch cache by default)
# TODO Passing options to fetcher:
# (for eg. retries and follow_redirects for URL, encoding, etc.)
# - default option dict;
# - override by input.
for resource in resources:
resource_fetcher = FETCHERS.get(resource["type"])
if not resource_fetcher:
return {"error": "FETCH_METHOD_NOT_SUPPORTED", "method": resource["type"]}
# Catch fetch error.
fetched_data, fetch_error = resource_fetcher(resource, get_from_cache)
if fetch_error:
return fetch_error
on_fetched_error = on_fetched(resource, fetched_data)
if on_fetched_error:
return on_fetched_error
|
dcristoloveanu/qpid-proton
|
proton-c/env.py
|
Python
|
apache-2.0
| 2,444
| 0.003682
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either
|
express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# A platform-agnostic tool for running a program in a modified environment.
#
import sys
import os
import subprocess
from optparse import OptionParser
def main(argv=None):
parser = OptionParser(usage="Usage: %prog [options] [--] VAR=VALUE... command [options] arg1 arg2...")
parser.add_opti
|
on("-i", "--ignore-environment",
action="store_true", default=False,
help="Start with an empty environment (do not inherit current environment)")
(options, args) = parser.parse_args(args=argv)
if options.ignore_environment:
new_env = {}
else:
new_env = os.environ.copy()
# pull out each name value pair
while (len(args)):
z = args[0].split("=",1)
if len(z) != 2:
break; # done with env args
if len(z[0]) == 0:
raise Exception("Error: incorrect format for env var: '%s'" % str(args[x]))
del args[0]
if len(z[1]) == 0:
# value is not present, so delete it
if z[0] in new_env:
del new_env[z[0]]
else:
new_env[z[0]] = z[1]
if len(args) == 0 or len(args[0]) == 0:
raise Exception("Error: syntax error in command arguments")
if new_env.get("VALGRIND") and new_env.get("VALGRIND_ALL"):
# Python generates a lot of possibly-lost errors that are not errors, don't show them.
args = [new_env.get("VALGRIND"), "--show-reachable=no", "--show-possibly-lost=no",
"--error-exitcode=42"] + args
p = subprocess.Popen(args, env=new_env)
return p.wait()
if __name__ == "__main__":
sys.exit(main())
|
EdDev/vdsm
|
tests/vmapi_test.py
|
Python
|
gpl-2.0
| 3,422
| 0
|
#
# Copyright 2014-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
#
|
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from vdsm.virt import vmexitreason
from virt import vm
from vdsm.common import cache
from vdsm.common import define
from testlib import VdsmTestCase as TestCaseBase
from vdsm.api import vdsmapi
import API
from clientIF import clientIF
from tes
|
tValidation import brokentest
from monkeypatch import MonkeyPatch, MonkeyPatchScope
import vmfakelib as fake
class TestSchemaCompliancyBase(TestCaseBase):
@cache.memoized
def _getAPI(self):
paths = [vdsmapi.find_schema()]
return vdsmapi.Schema(paths, True)
def assertVmStatsSchemaCompliancy(self, schema, stats):
api = self._getAPI()
ref = api.get_type(schema)
for prop in ref.get('properties'):
name = prop.get('name')
if 'defaultvalue' in prop:
# optional, may be absent and it is fine
if name in stats:
self.assertNotEqual(stats[name], None)
else:
# mandatory
self.assertIn(name, stats)
# TODO: type checking
_VM_PARAMS = {
'displayPort': -1, 'displaySecurePort': -1, 'display': 'qxl',
'displayIp': '127.0.0.1', 'vmType': 'kvm', 'devices': {},
'memSize': 1024,
# HACKs
'pauseCode': 'NOERR'}
class TestVmStats(TestSchemaCompliancyBase):
@MonkeyPatch(vm.Vm, 'send_status_event', lambda x: None)
def testDownStats(self):
with fake.VM() as testvm:
testvm.setDownStatus(define.ERROR, vmexitreason.GENERIC_ERROR)
self.assertVmStatsSchemaCompliancy('ExitedVmStats',
testvm.getStats())
@brokentest('Racy test, see http://gerrit.ovirt.org/37275')
def testRunningStats(self):
with fake.VM(_VM_PARAMS) as testvm:
self.assertVmStatsSchemaCompliancy('RunningVmStats',
testvm.getStats())
class TestApiAllVm(TestSchemaCompliancyBase):
@brokentest('Racy test, see http://gerrit.ovirt.org/36894')
def testAllVmStats(self):
with fake.VM(_VM_PARAMS) as testvm:
with MonkeyPatchScope([(clientIF, 'getInstance',
lambda _: testvm.cif)]):
api = API.Global()
# here is where clientIF will be used.
response = api.getAllVmStats()
self.assertEqual(response['status']['code'], 0)
for stat in response['statsList']:
self.assertVmStatsSchemaCompliancy(
'RunningVmStats', stat)
|
scrapinghub/exporters
|
tests/test_persistence.py
|
Python
|
bsd-3-clause
| 3,967
| 0.002017
|
import unittest
from mock import patch
from exporters.exporter_config import ExporterConfig
from exporters.persistence.base_persistence import BasePersistence
from exporters.persistence.pickle_persistence import PicklePersistence
from exporters.utils import remove_if_exists
from .utils import valid_config_with_updates, meta
class BasePersistenceTest(unittest.TestCase):
def setUp(self):
self.config = valid_config_with_updates({
'exporter_options': {
'log_level': 'DEBUG',
'logger_name': 'export-pipeline',
'resume': False,
}
})
def test_get_last_position(self):
exporter_config = ExporterConfig(self.config)
with self.assertRaises(NotImplementedError):
persistence = BasePersistence(exporter_config.persistence_options, meta())
persistence.get_last_position()
def test_commit_position(self):
exporter_config = ExporterConfig(self.config)
with self.assertRaises(NotImplementedError):
persistence = BasePersistence(exporter_config.persistence_options, meta())
persistence.commit_position(1)
def test_generate_new_job(self):
exporter_config = ExporterConfig(self.config)
with self.assertRaises(NotImplementedError):
persistence = BasePersis
|
tence(expor
|
ter_config.persistence_options, meta())
persistence.generate_new_job()
def test_delete_instance(self):
exporter_config = ExporterConfig(self.config)
with self.assertRaises(NotImplementedError):
persistence = BasePersistence(exporter_config.persistence_options, meta())
persistence.close()
class PicklePersistenceTest(unittest.TestCase):
def setUp(self):
self.config = valid_config_with_updates({
'exporter_options': {
'log_level': 'DEBUG',
'logger_name': 'export-pipeline',
'resume': False,
},
'persistence': {
'name': 'exporters.persistence.pickle_persistence.PicklePersistence',
'options': {'file_path': '/tmp'}
}
})
@patch('pickle.dump')
@patch('uuid.uuid4')
def test_create_persistence_job(self, mock_uuid, mock_pickle):
file_name = '1'
mock_pickle.dump.return_value = True
mock_uuid.return_value = file_name
exporter_config = ExporterConfig(self.config)
try:
persistence = PicklePersistence(
exporter_config.persistence_options, meta())
self.assertIsInstance(persistence, PicklePersistence)
persistence.close()
finally:
remove_if_exists('/tmp/'+file_name)
@patch('os.path.isfile', autospec=True)
@patch('__builtin__.open', autospec=True)
@patch('pickle.dump', autospec=True)
@patch('pickle.load', autospec=True)
def test_get_last_position(self, mock_load_pickle, mock_dump_pickle, mock_open, mock_is_file):
mock_dump_pickle.return_value = True
mock_is_file.return_value = True
mock_load_pickle.return_value = {'last_position': {'last_key': 10}}
exporter_config = ExporterConfig(self.config)
persistence = PicklePersistence(exporter_config.persistence_options, meta())
self.assertEqual({'last_key': 10}, persistence.get_last_position())
@patch('__builtin__.open', autospec=True)
@patch('pickle.dump', autospec=True)
@patch('uuid.uuid4', autospec=True)
def test_commit(self, mock_uuid, mock_dump_pickle, mock_open):
mock_dump_pickle.return_value = True
mock_uuid.return_value = 1
exporter_config = ExporterConfig(self.config)
persistence = PicklePersistence(exporter_config.persistence_options, meta())
self.assertEqual(None, persistence.commit_position(10))
self.assertEqual(persistence.get_metadata('commited_positions'), 1)
|
AlSayedGamal/python_zklib
|
zklib/zkRegevent.py
|
Python
|
gpl-2.0
| 2,460
| 0.01748
|
from struct import pack, unpack
from datetime import datetime, date
import sys
from zkconst import *
def reverseHex(hexstr):
tmp = ''
for i in reversed( xrange( len(hexstr)/2 ) ):
tmp += hexstr[i*2:(i*2)+2]
return tmp
def zkRegevent(self):
"""register for live events"""
print "reg event"
command = CMD_REG_EVENT
command_string = ''
chksum = 0
session_id = 0
reply_id = unpack('HHHH', self.data_recv[:8])[3]
buf = self.createHeader(command, chksum, session_id,reply_id, command_string)
self.zkclient.sendto(buf, self.address)
#print buf.encode("hex")
self.data_recv, addr = self.zkclient.recvfrom(1024)
self.session_id = unpack('HHHH', self.data_recv[:8])[2]
print "size", sys.getsizeof(data_recv)
print "size", len(data_recv)
lensi = len(data_recv) / 2
fstri = str(lensi) + "H"
print "first unpack ", unpack (fstri, data_recv)
if unpack('4H',data_recv[:8])[0] == CMD_PREPARE_DATA:
print "received CMD_PREPARE_DATA"
size = unpack('I', data_recv[8:12])[0]
if unpack('4H', data_recv[:8])[0] == CMD_ACK_OK:
print "CMD_ACK_OK from regevent"
print 'Receiving %s %s' % (size,"bytes")
#data_recv, addr = self.zkclient.recvfrom(43773)
#lens = len(self.data_recv) / 2
#fstr = str(lens) + "H"
#print "second unpack", unpack(fstr, self.data_recv)
while True: #unpack('4H', data_recv[:8])[0] != CMD_ACK_OK or unpack('4H', data_recv[:8])[0] == CMD_DATA:
print "COUNTER", i
data_recv, addr = self.zkclient.recvfrom(size)
|
lens = len(data_recv[:8]) / 2
fstr = str(lens) + "H"
if unpack(fstr, data_recv[:8])[0] == CMD_DATA:
i = i +1
|
print "data package " , unpack(fstr, data_recv[:8])[0]
lens = len(data_recv) / 2
fstr = str(lens) + "H"
print "data unpack", unpack(fstr, data_recv)
if i == 1:
self.attendancedata.append(data_recv)
elif i == 2:
#atti.append(data_recv)
self.attendancedata.append(data_recv)
if unpack('4H', data_recv[:8])[0] == CMD_ACK_OK:
print "CMD_ACK_OK"
#acmOK(self)
if unpack('4H', data_recv[:8])[0] == CMD_ACK_OK:
print "CMD_ACK_OK"
|
carver/ens.py
|
tests/test_setup_name.py
|
Python
|
mit
| 3,383
| 0.001478
|
import pytest
from unittest.mock import Mock
from ens.main import UnauthorizedError, AddressMismatch, UnownedName
'''
API at: https://github.com/carver/ens.py/issues/2
'''
@pytest.fixture
d
|
ef ens2(ens, mocker, addr1, addr9, hash9):
mocker.patch.object(ens, '_setup_reverse')
mocker.patch.object(ens, 'address', return_value=None)
mocker.patch.object(ens, 'owner', return_value=None)
|
mocker.patch.object(ens.web3, 'eth', wraps=ens.web3.eth, accounts=[addr1, addr9])
mocker.patch.object(ens, 'setup_address')
'''
mocker.patch.object(ens, '_resolverContract', return_value=Mock())
mocker.patch.object(ens, '_first_owner', wraps=ens._first_owner)
mocker.patch.object(ens, '_claim_ownership', wraps=ens._claim_ownership)
mocker.patch.object(ens, '_set_resolver', wraps=ens._set_resolver)
mocker.patch.object(ens.ens, 'resolver', return_value=None)
mocker.patch.object(ens.ens, 'setAddr', return_value=hash9)
mocker.patch.object(ens.ens, 'setResolver')
mocker.patch.object(ens.ens, 'setSubnodeOwner')
'''
return ens
def test_cannot_set_name_on_mismatch_address(ens2, mocker, name1, addr1, addr2):
mocker.patch.object(ens2, 'address', return_value=addr2)
with pytest.raises(AddressMismatch):
ens2.setup_name(name1, addr1)
def test_setup_name_default_address(ens2, mocker, name1, addr1):
mocker.patch.object(ens2, 'address', return_value=addr1)
ens2.setup_name(name1)
ens2._setup_reverse.assert_called_once_with(name1, addr1, transact={})
def test_setup_name_default_to_owner(ens2, mocker, name1, addr1):
mocker.patch.object(ens2, 'owner', return_value=addr1)
ens2.setup_name(name1)
ens2._setup_reverse.assert_called_once_with(name1, addr1, transact={})
def test_setup_name_unowned_exception(ens2, name1):
with pytest.raises(UnownedName):
ens2.setup_name(name1)
def test_setup_name_unauthorized(ens2, mocker, name1, addr1):
mocker.patch.object(ens2, 'address', return_value=addr1)
mocker.patch.object(ens2.web3, 'eth', wraps=ens2.web3.eth, accounts=[])
with pytest.raises(UnauthorizedError):
ens2.setup_name(name1, addr1)
def test_setup_name_no_resolution(ens2, name1, addr1):
ens2.setup_name(name1, addr1)
ens2._setup_reverse.assert_called_once_with(name1, addr1, transact={})
def test_setup_name_transact_passthrough(ens2, name1, addr1):
transact = {'gasPrice': 1}
ens2.setup_name(name1, addr1, transact=transact)
ens2._setup_reverse.assert_called_once_with(name1, addr1, transact=transact)
def test_setup_name_resolver_setup(ens2, name1, addr1):
# if the name doesn't currently resolve to anything, set it up
transact = {'gasPrice': 1}
ens2.setup_name(name1, addr1, transact=transact)
ens2.setup_address.assert_called_once_with(name1, addr1, transact=transact)
def test_setup_reverse_label_to_fullname(ens, mocker, addr1):
registrar = mocker.patch.object(ens, '_reverse_registrar', return_value=Mock())
ens._setup_reverse('castleanthrax', addr1)
registrar().setName.assert_called_once_with('castleanthrax.eth', transact={'from': addr1})
def test_setup_reverse_dict_unmodified(ens, mocker, addr1):
mocker.patch.object(ens, '_reverse_registrar', return_value=Mock())
transact = {}
ens._setup_reverse('castleanthrax', addr1, transact=transact)
assert transact == {}
|
GenericMappingTools/gmt-python
|
examples/gallery/images/track_sampling.py
|
Python
|
bsd-3-clause
| 1,614
| 0.005576
|
"""
Sampling along tracks
---------------------
The :func:`pygmt.grdtrack` function samples a raster grid's value along specified
points. We will need to input a 2D raster to ``grid`` which can be an
:class:`xarray.DataArray`. The argument passed to the ``points`` parameter can be a
:class:`pandas.DataFrame` table where the first two columns are x and y (or longitude
and latitude). Note also that there is a ``newcolname`` parameter that will be used to
name the new column of values sampled from the grid.
Alternatively, a NetCDF file path can be passed to ``grid``. An ASCII file path can
also be accepted for ``points``. To save an output ASCII file, a file name argument
needs to be passed to the ``outfile`` parameter.
"""
import pygmt
# Load sample grid and point datasets
grid = pygmt.datasets.load_earth_relief()
points = pygmt.datasets.load_ocean_ridge_points()
# Sample the bathymetry along th
|
e world's ocean ridges at specified track points
track = pygmt.grdtrack(points=points, grid=grid, newcolname="bathymetry")
fig = pygmt.Figure()
# Plot the earth relief grid on Cylindrical Stereographic projection, masking land areas
fig.basemap(region="g", projection="Cyl_stere/150/-20/15c", frame=True)
fig.grdimag
|
e(grid=grid, cmap="gray")
fig.coast(land="#666666")
# Plot the sampled bathymetry points using circles (c) of 0.15 cm size
# Points are colored using elevation values (normalized for visual purposes)
fig.plot(
x=track.longitude,
y=track.latitude,
style="c0.15c",
cmap="terra",
color=(track.bathymetry - track.bathymetry.mean()) / track.bathymetry.std(),
)
fig.show()
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/Python/lib/site-packages/PIL/_binary.py
|
Python
|
gpl-3.0
| 1,855
| 0.001078
|
#
# The Python Imaging Library.
# $Id$
#
# Binary input/output support routines.
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1995-2003 by Fredrik Lundh
# Copyright (c) 2012 by Brian Crowell
#
# See the README file for information on usage and redistribution.
#
from struct import unpack, pack
if bytes is str:
def i8(c):
return ord(c)
def o8(i):
return chr(i & 255)
else:
def i8(c):
return c if c.__class__ is int else c[0]
def o8(i):
return bytes((i & 255,))
# Input, le = little endian, be = big endian
# TODO: replace with more readable struct.unpack equivalent
def i16le(c, o=0):
"""
Converts a 2-bytes (16 bits) string to an unsigned integer.
c: string containing bytes to convert
o: offset of bytes to convert in string
"""
return unpack("<H", c[o
|
:o+2])[0]
def si16le(c, o=0):
"""
Converts a 2-by
|
tes (16 bits) string to a signed integer.
c: string containing bytes to convert
o: offset of bytes to convert in string
"""
return unpack("<h", c[o:o+2])[0]
def i32le(c, o=0):
"""
Converts a 4-bytes (32 bits) string to an unsigned integer.
c: string containing bytes to convert
o: offset of bytes to convert in string
"""
return unpack("<I", c[o:o+4])[0]
def si32le(c, o=0):
"""
Converts a 4-bytes (32 bits) string to a signed integer.
c: string containing bytes to convert
o: offset of bytes to convert in string
"""
return unpack("<i", c[o:o+4])[0]
def i16be(c, o=0):
return unpack(">H", c[o:o+2])[0]
def i32be(c, o=0):
return unpack(">I", c[o:o+4])[0]
# Output, le = little endian, be = big endian
def o16le(i):
return pack("<H", i)
def o32le(i):
return pack("<I", i)
def o16be(i):
return pack(">H", i)
def o32be(i):
return pack(">I", i)
|
michaelnetbiz/mistt-solution
|
app/models/cases.py
|
Python
|
mit
| 114
| 0
|
from
|
app import db, GenericRecord
class Case(GenericRecord):
__collection__ = 'cases'
|
db.register([Case])
|
elebihan/grissom
|
grissom/__init__.py
|
Python
|
gpl-3.0
| 909
| 0
|
# -*- coding: utf-8 -*-
#
# grissom - FOSS compliance tools
#
# Copyright (c) Eric Le Bihan <eric.le.bihan.dev@free.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
FOSS compliance tools
"""
__version__ = '0.1.1'
from . import binfmt
from . imp
|
ort formatters
from . import legal
# vim: ts=4 sts=4 sw=4 et ai
|
ShengRang/c4f
|
leetcode/combination-sum-ii.py
|
Python
|
gpl-3.0
| 1,342
| 0.007452
|
# coding: utf-8
class Solution(object):
@staticmethod
def dfs(candidates, target, vis, res, cur_idx, sum):
if sum > target:
return
if sum == target:
ans = [candidates[i] for i in cur_idx if i >= 0]
res.append(ans)
return
if sum
|
< target:
for i, v in enumerate(candidates):
if sum + v > target:
break
if i != cur_idx[-1] + 1 and candidates[
|
i] == candidates[i-1]:
continue
if i >= cur_idx[-1] and (not vis[i]):
vis[i] = 1
cur_idx.append(i)
Solution.dfs(candidates, target, vis, res, cur_idx, sum+v)
vis[i] = 0
cur_idx.pop()
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
candidates = sorted(candidates)
n = len(candidates)
res = []
cur_idx = [-1]
vis = [0 for _ in candidates]
Solution.dfs(candidates, target, vis, res, cur_idx, 0)
# return map(list, list(res))
return res
s = Solution()
print s.combinationSum2([10,1,2,7,6,1,5], 8)
print s.combinationSum2([2,5,2,1,2], 5)
|
tmfoltz/worldengine
|
tests/drawing_functions_test.py
|
Python
|
mit
| 1,681
| 0.004164
|
import unittest
from worldengine.drawing_functions import draw_ancientmap, gradient, draw_rivers_on_image
from worldengine.world import World
from tests.draw_test import TestBase, PixelCollector
class TestDrawingFunctions(TestBase):
def setUp(self):
super(TestDrawingFunctions, self).setUp()
self.w = World.open_protobuf("%s/seed_28070.world" % self.tests_data_dir)
def test_draw_ancient_map_factor1(self):
w_large = World.from_pickle_file("%s/seed_48956.world" % self.tests_data_dir)
target = PixelCollector(w_large.width, w_large.height)
draw_ancientmap(w_large, target, resize_factor=1)
self._assert_img_equal("ancientmap_48956", target)
def test_draw_ancient_map_factor3(self):
target = PixelCollector(self.w.width * 3, self.w.height * 3)
draw_ancientmap(self.w, target, resize_factor=3)
self._assert_img_equal("ancientmap_28070_factor3", target)
def test_gradient(self):
self._assert_are_colors_equal((10, 20, 40),
gradient(0.0, 0.0, 1.0, (10, 20, 40), (0, 128, 240)))
self._assert_are_colors_equa
|
l((0, 128, 240),
gradient(1.0, 0.0, 1.0, (10, 20, 40), (0, 128, 240)))
|
self._assert_are_colors_equal((5, 74, 140),
gradient(0.5, 0.0, 1.0, (10, 20, 40), (0, 128, 240)))
def test_draw_rivers_on_image(self):
target = PixelCollector(self.w.width * 2, self.w.height * 2)
draw_rivers_on_image(self.w, target, factor=2)
self._assert_img_equal("rivers_28070_factor2", target)
if __name__ == '__main__':
unittest.main()
|
jptomo/rpython-lang-scheme
|
rpython/jit/backend/x86/vector_ext.py
|
Python
|
mit
| 29,220
| 0.001711
|
import py
from rpython.jit.metainterp.compile import ResumeGuardDescr
from rpython.jit.metainterp.history import (ConstInt, INT, REF,
FLOAT, VECTOR, TargetToken)
from rpython.jit.backend.llsupport.descr import (ArrayDescr, CallDescr,
unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr)
from rpython.jit.backend.x86.regloc import (FrameLoc, RegLoc, ConstFloatLoc,
FloatImmedLoc, ImmedLoc, imm, imm0, imm1, ecx, eax, edx, ebx, esi, edi,
ebp, r8, r9, r10, r11, r12, r13, r14, r15, xmm0, xmm1, xmm2, xmm3, xmm4,
xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14,
X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, AddressLoc)
from rpython.jit.backend.llsupport.regalloc import (get_scale, valid_addressing_size)
from rpython.jit.metainterp.resoperation import rop, ResOperation
from rpython.rlib.objectmodel import we_are_translated
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.lltypesystem import lltype
from rpython.jit.backend.x86 import rx86
# duplicated for easy migration, def in assembler.py as well
# DUP START
def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0):
return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset)
def heap(addr):
return AddressLoc(ImmedLoc(addr), imm0, 0, 0)
def not_implemented(msg):
msg = '[x86/vector_ext] %s\n' % msg
if we_are_translated():
llop.debug_print(lltype.Void, msg)
raise NotImplementedError(msg)
# DUP END
class VectorAssemblerMixin(object):
_mixin_ = True
def guard_vector(self, guard_op, loc, true):
arg = guard_op.getarg(0)
size = arg.bytesize
temp = X86_64_XMM_SCRATCH_REG
load = arg.bytesize * arg.count - self.cpu.vector_register_size
assert load <= 0
if true:
self.mc.PXOR(temp, temp)
# if the vector is not fully packed blend 1s
if load < 0:
self.mc.PCMPEQQ(temp, temp) # fill with ones
self._blend_unused_slots(loc, arg, temp)
# reset to zeros
self.mc.PXOR(temp, temp)
# cmp with zeros (in temp) creates ones at each slot where it is zero
self.mc.PCMPEQ(loc, temp, size)
# temp converted to ones
self.mc.PCMPEQQ(temp, temp)
# test if all slots are zero
self.mc.PTEST(loc, temp)
self.guard_success_cc = rx86.Conditions['Z']
else:
# if the vector is not fully packed blend 1s
if load < 0:
temp = X86_64_XMM_SCRATCH_REG
self.mc.PXOR(temp, temp)
self._blend_unused_slots(loc, arg, temp)
self.mc.PTEST(loc, loc)
self.guard_success_cc = rx86.Conditions['NZ']
def _blend_unused_slots(self, loc, arg, temp):
select = 0
bits_used = (arg.count * arg.bytesize * 8)
index = bits_used // 16
while index < 8:
select |= (1 << index)
index += 1
self.mc.PBLENDW_xxi(loc.value, temp.value, select)
def _update_at_exit(self, fail_locs, fail_args, faildescr, regalloc):
""" If accumulation is done in this loop, at the guard exit
some vector registers must be adjusted to yield the correct value
"""
if not isinstance(faildescr, ResumeGuardDescr):
return
assert regalloc is not None
accum_info = faildescr.rd_vector_info
while accum_info:
pos = accum_info.getpos_in_failargs()
scalar_loc = fail_locs[pos]
vector_loc = accum_info.location
# the upper elements will be lost if saved to the stack!
scalar_arg = accum_info.getoriginal()
assert isinstance(vector_loc, RegLoc)
if not isinstance(scalar_loc, RegLoc):
scalar_loc = regalloc.force_allocate_reg(scalar_arg)
assert scalar_arg is not None
if accum_info.accum_operation == '+':
self._accum_reduce_sum(scalar_arg, vector_loc, scalar_loc)
elif accum_info.accum_operation == '*':
self._accum_reduce_mul(scalar_arg, vector_loc, scalar_loc)
else:
not_implemented("accum operator %s not implemented" %
(accum_info.accum_operation))
accum_info = accum_info.next()
def _accum_reduce_mul(self, arg, accumloc, targetloc):
scratchloc = X86_64_XMM_SCRATCH_REG
self.mov(accumloc, scratchloc)
# swap the two elements
self.mc.SHUFPD_xxi(scratchloc.value, scratchloc.value, 0x01)
self.mc.MULSD(accumloc, scratchloc)
if accumloc is not targetloc:
self.mov(accumloc, targetloc)
def _accum_reduce_sum(self, arg, accumloc, targetloc):
# Currently the accumulator can ONLY be the biggest
# size for X86 -> 64 bit float/int
if arg.type == FLOAT:
# r = (r[0]+r[1],r[0]+r[1])
self.mc.HADDPD(accumloc, accumloc)
# upper bits (> 64) are dirty (but does not matter)
if accumloc is not targetloc:
self.mov(accumloc, targetloc)
return
elif arg.type == INT:
scratchloc = X86_64_SCRATCH_REG
self.mc.PEXTRQ_rxi(targetloc.value, accumloc.value, 0)
self.mc.PEXTRQ_rxi(scratchloc.value, accumloc.value, 1)
self.mc.ADD(targetloc, scratchloc)
return
not_implemented("reduce sum fo
|
r %s not impl." % arg)
def _genop_vec_getarrayitem(self, op, arglocs, resloc):
|
# considers item scale (raw_load does not)
base_loc, ofs_loc, size_loc, ofs, integer_loc, aligned_loc = arglocs
scale = get_scale(size_loc.value)
src_addr = addr_add(base_loc, ofs_loc, ofs.value, scale)
self._vec_load(resloc, src_addr, integer_loc.value,
size_loc.value, aligned_loc.value)
genop_vec_getarrayitem_raw_i = _genop_vec_getarrayitem
genop_vec_getarrayitem_raw_f = _genop_vec_getarrayitem
genop_vec_getarrayitem_gc_i = _genop_vec_getarrayitem
genop_vec_getarrayitem_gc_f = _genop_vec_getarrayitem
def _genop_vec_raw_load(self, op, arglocs, resloc):
base_loc, ofs_loc, size_loc, ofs, integer_loc, aligned_loc = arglocs
src_addr = addr_add(base_loc, ofs_loc, ofs.value, 0)
self._vec_load(resloc, src_addr, integer_loc.value,
size_loc.value, aligned_loc.value)
genop_vec_raw_load_i = _genop_vec_raw_load
genop_vec_raw_load_f = _genop_vec_raw_load
def _vec_load(self, resloc, src_addr, integer, itemsize, aligned):
if integer:
if aligned:
self.mc.MOVDQA(resloc, src_addr)
else:
self.mc.MOVDQU(resloc, src_addr)
else:
if itemsize == 4:
self.mc.MOVUPS(resloc, src_addr)
elif itemsize == 8:
self.mc.MOVUPD(resloc, src_addr)
def _genop_discard_vec_setarrayitem(self, op, arglocs):
# considers item scale (raw_store does not)
base_loc, ofs_loc, value_loc, size_loc, baseofs, integer_loc, aligned_loc = arglocs
scale = get_scale(size_loc.value)
dest_loc = addr_add(base_loc, ofs_loc, baseofs.value, scale)
self._vec_store(dest_loc, value_loc, integer_loc.value,
size_loc.value, aligned_loc.value)
genop_discard_vec_setarrayitem_raw = _genop_discard_vec_setarrayitem
genop_discard_vec_setarrayitem_gc = _genop_discard_vec_setarrayitem
def genop_discard_vec_raw_store(self, op, arglocs):
base_loc, ofs_loc, value_loc, size_loc, baseofs, integer_loc, aligned_loc = arglocs
dest_loc = addr_add(base_loc, ofs_loc, baseofs.value, 0)
self._vec_store(dest_loc, value_loc, integer_loc.value,
size_loc.value, aligned_loc.value)
def _vec_store(self, dest_loc, value_loc, integer, itemsize, aligned):
if integer:
if aligned:
self.mc.MOVDQA(dest_loc, value_loc)
else:
self.mc.MOVDQU(dest_l
|
tdely/freischutz
|
tools/freischutz-client.py
|
Python
|
bsd-3-clause
| 6,393
| 0.001252
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
CLI client for interacting with Freischutz RESTful APIs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:see: https://gitlab.com/tdely/freischutz/ Freischutz on GitLab
:author: Tobias Dély (tdely) <cleverhatcamouflage@gmail.com>
:copyright: (c) 2018-present Tobias Dély.
:licence: https://directory.fsf.org/wiki/License:BSD-3-Clause BSD-3-Clause
"""
import argparse
import re
import sys
import time
import base64
import hashlib
import hmac
import random
import string
import requests
def hawk_build(uid, key, url, method, ctype, data='', alg='', ext='', verbose=False):
"""
Build Hawk authentication header
:param uid: Hawk client ID
:param key: Hawk client key
:param url: HTTP request URL
:param method: HTTP request method
:param ctype: HTTP request content type
:param data: HTTP request data
:param alg: Hawk hash algorithm
:param ext: Hawk ext
:param verbose:
:returns: Header content
"""
try:
crypto = getattr(hashlib, alg)
except AttributeError as e:
print("Unsupported hash algorithm '{}', available: '{}'.".format(
alg,
"', '".join(hashlib.algorithms_available)
))
sys.exit(1)
matches = re.match(r'^(http|https)://([^:/]+)(:([0-9]+))?(/(.+)?)?', url)
(protocol, host, x, port, uri, _) = matches.groups()
if port is None:
if protocol == "https":
port = 443
elif protocol == "http":
port = 80
else:
print('Unknown protocol specified: {}'.format(protocol))
sys.exit(1)
ts = int(time.time())
payload = "hawk.1.payload\n
|
"
payload += "{}\n"
payload += "{}\n"
payload = payload.format(ctype, data)
payload_hash = base64.b64encode(crypto(payload).digest())
nonce = ''.join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(6)
)
if ext:
|
ext = "alg={};{}".format(alg, ext)
else:
ext = "alg={}".format(alg)
msg = "hawk.1.header\n"
msg += "{}\n"
msg += "{}\n"
msg += "{}\n"
msg += "{}\n"
msg += "{}\n"
msg += "{}\n"
msg += "{}\n"
msg += "{}\n"
msg = msg.format(ts, nonce, method, uri, host, port, payload_hash, ext)
mac = base64.b64encode(hmac.new(key, msg, crypto).digest())
if verbose:
print("-------------------------------------------\n"
"{}"
"-------------------------------------------\n"
"{}"
"-------------------------------------------\n"
"MAC:\n{}\n".format(payload, msg, mac))
header = 'Hawk id="{}", ts="{}", nonce="{}", mac="{}", hash="{}", ext="{}"'.format(
uid, ts, nonce, mac, payload_hash, ext
)
return header
def basic_auth_build(uid, key):
"""
Build basic authentication header.
:param uid: Username
:param key: Password
:returns: Header content
"""
return 'Basic {}'.format(base64.b64encode('{}:{}'.format(uid, key)))
def main():
"""Main"""
argparser = argparse.ArgumentParser(
description=''
)
argparser.add_argument('-a', '--algorithm', metavar='STR', dest='alg',
action='store', type=str, default='sha256',
help='hash algorithm to use for Hawk, default sha256')
argparser.add_argument('-B', '--basic', dest='basic', action='store_true',
help='use basic authentication')
argparser.add_argument('-H', '--hawk', dest='hawk', action='store_true',
help='use Hawk authentication')
argparser.add_argument('-T', '--bearer', metavar='STR', dest='bearer',
action='store', type=str,
help='use bearer token authentication')
argparser.add_argument('-i', '--id', metavar='STR', dest='id',
action='store', type=str, help='authentication ID')
argparser.add_argument('-k', '--key', metavar='STR', dest='key',
action='store', type=str, help='authentication key')
argparser.add_argument('-c', '--content-type', metavar='STR', dest='type',
action='store', type=str, default='text/plain',
help='HTTP request content type, default text/plain')
argparser.add_argument('-d', '--data', metavar='STR', dest='data',
action='store', type=str, default='',
help='HTTP request data')
argparser.add_argument('-e', '--ext', metavar='STR', dest='ext',
action='store', type=str, default='',
help='optional ext value for Hawk')
argparser.add_argument('-m', '--method', metavar='STR', dest='method',
action='store', type=str, default='GET',
help='HTTP request method')
argparser.add_argument('-V', '--verbose', dest='verbose', action='store_true',
help='show HTTP info')
argparser.add_argument('url')
args = argparser.parse_args()
headers = {'Content-Type': args.type}
if args.hawk:
if not args.id or not args.key:
print('Hawk requires -id and -key to be set')
sys.exit(1)
headers['Authorization'] = hawk_build(
args.id,
args.key,
args.url,
args.method,
args.type,
args.data,
args.alg,
args.ext,
args.verbose
)
if args.basic:
if not args.id or not args.key:
print('Basic authentication requires -id and -key to be set')
sys.exit(1)
headers['Authorization'] = basic_auth_build(args.id, args.key)
if args.bearer:
headers['Authorization'] = args.bearer
matches = re.match(r'^(http|https)://([^:/]+)(:([0-9]+))?(/(.+)?)?', args.url)
(protocol, host, x, port, uri, x) = matches.groups()
del x
response = requests.request(args.method, args.url, data=args.data, headers=headers)
result = {
'status': response.status_code,
'type': response.headers['content-type'],
'content': response.content
}
print(result)
if __name__ == '__main__':
sys.exit(main())
|
adamgreig/sheepdog
|
examples/numpy_example.py
|
Python
|
mit
| 531
| 0.00565
|
"""
Example Sheepdog script with numpy.
We'll import and make global numpy inside our Sheepdog function,
and th
|
en any other function can also use it.
"""
import sheepdog
def f(a, b):
import numpy as np
global np
return g(a, b)
def g(a, b):
return np.mean(np.array((a, b)))
args = [(1, 1), (1, 2), (2, 2)]
print("Running f(a,b) for arguments:")
print(args)
config = {
"host": "fear",
}
namespace = {"g": g}
results = sheepdog.map(f, args, config, namespace)
print("\nReceived results:")
print(resul
|
ts)
|
mstrisoline/ufb
|
run.py
|
Python
|
gpl-2.0
| 181
| 0.01105
|
#!/usr/bin/en
|
v python
#Run the modularized application
from ufb import app, db
if __name__ == "__main__":
app.debug = True
db.create_all(app
|
=app)
app.run(debug=True)
|
fifengine/fifengine
|
engine/python/fife/extensions/loaders.py
|
Python
|
lgpl-2.1
| 2,646
| 0.015495
|
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2019 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
""" Loaders plugin manager """
from __future__ import print_function
import os.path
from fife import fife
from fife.extensions.serializers.xmlmap import XMLMapLoader
mapFileMapping = { 'xml' : XMLMapLoader}
fileExtensions = set(['xml'])
def loadMapFile(path, engine, callback=None, debug=True, extensions={}):
""" load map file and get (an optional) callback if major stuff is done:
- map creation
- parsed imports
- parsed layers
- parsed cameras
the callback will send both a string and a float (which shows
the overall process), callback(string, float)
@type engine: object
@param engine: FIFE engine instance
@type callback: function
@param callback: callback for maploading progress
@type debug: bool
@param debug:
|
flag to activate / deactivate print statements
@rtype object
@return FIFE map object
"""
(filename, extension) = os.path.splitext(path)
map_loader = mapFileMapping[extension[1:]](engine, callback, debug, extensions)
map = map_loader.loadResource(path)
if debug: print("--- Loading map took: ", map_loader.time_to_load, " seconds.")
return map
def addMapLoader(fileExtension, loaderClass):
"""Add a new loader for fileextension
@type fileExtension: string
@param fileEx
|
tension: The file extension the loader is registered for
@type loaderClass: object
@param loaderClass: A fife.ResourceLoader implementation that loads maps
from files with the given fileExtension
"""
mapFileMapping[fileExtension] = loaderClass
_updateMapFileExtensions()
def _updateMapFileExtensions():
global fileExtensions
fileExtensions = set(mapFileMapping.keys())
|
wispwisp/supportNotebook
|
responseTemplates/admin.py
|
Python
|
mit
| 325
| 0
|
from django.contrib
|
import admin
from responseTemplates.models import ResponseTemplate, Paragraph
class ParagraphInline(admin.StackedInline):
model = Paragraph
extra = 1
class ResponseTemplateAdmin(admin.ModelAdmin):
inlines = [ParagraphInline]
admin.site.register(R
|
esponseTemplate, ResponseTemplateAdmin)
|
Azure/azure-sdk-for-python
|
sdk/monitor/azure-monitor-query/azure/monitor/query/aio/__init__.py
|
Python
|
mit
| 492
| 0.002033
|
# coding=utf-8
# ---------
|
-----------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------
from ._logs_query_client_async import LogsQueryClient
from ._metrics_query_client_async import MetricsQueryClient
__all__ = ["LogsQueryClient", "MetricsQueryClient"]
|
|
MahjongRepository/mahjong
|
mahjong/hand_calculating/yaku.py
|
Python
|
mit
| 1,121
| 0.001784
|
import warnings
class Yaku:
yaku_id = None
tenhou_id = None
name = None
han_open = None
han_closed = None
is_yakuman = None
def __init__(self, yaku_id=None):
self.tenhou_id = None
self.yaku_id = yaku_id
self.set_attributes()
def __str__(self):
return self.name
def __repr__(self):
# for calls in array
return self.__str__()
def is_condition_met(self, hand, *arg
|
s):
"""
Is this yaku exists in the hand?
:param: hand
:param: args: some yaku requires additional attributes
:return: boolean
"""
raise NotImplementedError
def set_attributes(self):
"""
Set id, name, han related to the yaku
"""
raise NotImplemente
|
dError
@property
def english(self):
warnings.warn("Use .name attribute instead of .english attribute", DeprecationWarning)
return self.name
@property
def japanese(self):
warnings.warn("Use .name attribute instead of .japanese attribute", DeprecationWarning)
return self.name
|
ipa-led/airbus_coop
|
airbus_plugins/airbus_plugin_log_manager/setup.py
|
Python
|
apache-2.0
| 888
| 0.003378
|
#!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version
|
2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless req
|
uired by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['airbus_plugin_log_manager'],
package_dir={'': 'src'},
)
setup(**d)
|
hwaf/hwaf
|
py-hwaftools/orch/waffuncs.py
|
Python
|
bsd-3-clause
| 4,382
| 0.006846
|
#!/usr/bin/env python
# encoding: utf-8
## stdlib imports
import os
from glob import glob
## 3rd party
from . import pkgconf
from . import envmunge
from . import features as featmod
from . import util
## waf imports
import waflib.Logs as msg
import waflib.Context as context
# NOT from the waf book. The waf book example for depends_on doesn't work
# from waflib import TaskGen
# @TaskGen.feature('*')
# @TaskGen.before_method('process_rule')
# def post_the_other(self):
# deps = getattr(self, 'depends_on', [])
# for name in self.to_list(deps):
# msg.debug('orch: DEPENDS_ON: %s %s' % ( self.name, name ))
# other = self.bld.get_tgen_by_name(name)
# other.post()
# for ot in other.tasks:
# msg.debug('orch: OTHER TASK: %s before: %s' % (ot, ot.before))
# ot.before.append(self.name)
# waf entries
def options(opt):
opt.add_option('--orch-config', action = 'store', default = 'orch.cfg',
help='Give an orchestration configuration file.')
opt.add_option('--orch-start', action = 'store', default = 'start',
help='Set the section to start the orchestration')
def bind_functions(ctx):
from pprint import PrettyPrinter
pp = PrettyPrinter(indent=2)
ctx.orch_dump = lambda : pp.pprint({'packages': ctx.env.orch_package_list,
'groups': ctx.env.orch_group_list})
def configure(cfg):
msg.debug('orch: CONFIG CALLED')
if not cfg.options.orch_config:
raise RuntimeError('No Orchestration configuration file given (--orch-config)')
orch_config = []
for lst in util.string2list(cfg.options.orch_config):
lst = lst.strip()
orch_config += glob(lst)
okay = True
for maybe in orch_config:
if os.path.exists(maybe):
continue
msg.error('No such file: %s' % maybe)
okay = False
if not okay or not orch_config:
raise ValueError('missing configuration files')
cfg.msg('Orch configuration files', '"%s"' % '", "'.join(orch_config))
extra = dict(cfg.env)
extra['top'] = cfg.path.abspath()
out = cfg.bldnode.abspath() # usually {top}/tmp
assert out, 'No out dir defined'
extra['out'] = out
extra['DESTDIR'] = getattr(cfg.options, 'destdir', '')
msg.debug('top="{top}" out="{out}" DESTDIR="{DESTDIR}"'.format(**extra))
suite = pkgconf.load(orch_config, start = cfg.options.orch_start, **extra)
envmunge.decompose(cfg, suite)
cfg.msg('Orch configure envs', '"%s"' % '", "'.join(cfg.all_envs.keys()))
bind_functions(cfg)
return
def build(bld):
msg.debug ('orch: BUILD CALLED')
bind_functions(bld)
import orch.features
feature_funcs, feature_configs = orch.features.load()
msg.info('Supported features: "%s"' % '", "'.join(sorted(feature_funcs.keys())))
msg.debug('orch: Build envs: %s' % ', '.join(sorted(bld.all_envs.keys())))
pfi_list = list()
to_recurse = []
for grpname in bld.env.orch_group_list:
msg.debug('orch: Adding group: "%s"' % grpname)
bld.add_group(grpname)
group = bld.env.orch_group_dict[grpname]
for package in group['packages']:
pkgname = package['package']
# delegate package to another wscript file?
other_wscript = os.path.join(bld.launch_dir, pkgname, 'wscript')
if os.path.exists(other_wscript):
|
msg.info('orch: delegating to %s' % other_wscript)
to_recurse.append(pkgn
|
ame)
continue
pkgcfg = bld.env.orch_package_dict[pkgname]
featlist = util.string2list(pkgcfg.get('features'))
msg.debug('orch: features for %s: "%s"' % (pkgname, '", "'.join(featlist)))
for feat in featlist:
try:
feat_func = feature_funcs[feat]
except KeyError:
msg.error('No method for feature: "%s", package: "%s"'%(feat,pkgname))
raise
msg.debug('orch: feature: "%s" for package: "%s"' % (feat, pkgname))
pfi = feat_func(bld, pkgcfg)
pfi_list.append(pfi)
if to_recurse:
bld.recurse(to_recurse)
for pfi in pfi_list:
pfi.register_dependencies()
msg.debug ('orch: BUILD CALLED [done]')
|
vladsaveliev/bcbio-nextgen
|
bcbio/rnaseq/cufflinks.py
|
Python
|
mit
| 10,844
| 0.000738
|
"""Assess transcript abundance in RNA-seq experiments using Cufflinks.
http://cufflinks.cbcb.umd.edu/manual.html
"""
import os
import shutil
import tempfile
import pandas as pd
from bcbio import bam
from bcbio.utils import get_in, file_exists, safe_makedir
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.rnaseq import gtf, annotate_gtf
def run(align_file, ref_file, data):
align_file = bam.convert_cufflinks_mapq(align_file)
config = data["config"]
cmd = _get_general_options(align_file, config)
cmd.extend(_get_no_assembly_options(ref_file, data))
out_dir = _get_output_dir(align_file, data)
tracking_file = os.path.join(out_dir, "genes.fpkm_tracking")
fpkm_file = os.path.join(out_dir, data['rgnames']['sample']) + ".fpkm"
tracking_file_isoform = os.path.join(out_dir, "isoforms.fpkm_tracking")
fpkm_file_isoform = os.path.join(out_dir, data['rgnames']['sample']) + ".isoform.fpkm"
if not file_exists(fpkm_file):
with file_transaction(data, out_dir) as tmp_out_dir:
safe_makedir(tmp_out_dir)
cmd.extend(["--output-dir", tmp_out_dir])
cmd.extend([align_file])
cmd = list(map(str, cmd))
do.run(cmd, "Cufflinks on %s." % (al
|
ign_file))
fpkm_file = gene_tracking_to_fpkm(tracking_file, fpkm_file)
fpkm_file_isoform = gene_tracking_to_fpkm(tracking_file_isoform, fpkm_file_isoform)
return out_dir, fpkm_file, fpkm_file_isoform
def gene_tracking_to_fpkm(tracking_file, out_file):
"""
take a gene-level tracking file from Cufflinks and output a two column
table with the first column as IDs and the second column as FPKM for the
sample. combines FP
|
KM from the same genes into one FPKM value to fix
this bug: http://seqanswers.com/forums/showthread.php?t=5224&page=2
"""
if file_exists(out_file):
return out_file
df = pd.io.parsers.read_csv(tracking_file, sep="\t", header=0)
df = df[['tracking_id', 'FPKM']]
df = df.groupby(['tracking_id']).sum()
df.to_csv(out_file, sep="\t", header=False, index_label=False)
return out_file
def _get_general_options(align_file, config):
options = []
cufflinks = config_utils.get_program("cufflinks", config)
options.extend([cufflinks])
options.extend(["--num-threads", config["algorithm"].get("num_cores", 1)])
options.extend(["--quiet"])
options.extend(["--no-update-check"])
options.extend(["--max-bundle-frags", 2000000])
options.extend(_get_stranded_flag(config))
return options
def _get_no_assembly_options(ref_file, data):
options = []
options.extend(["--frag-bias-correct", ref_file])
options.extend(["--multi-read-correct"])
options.extend(["--upper-quartile-norm"])
gtf_file = data["genome_resources"]["rnaseq"].get("transcripts", "")
if gtf_file:
options.extend(["--GTF", gtf_file])
mask_file = data["genome_resources"]["rnaseq"].get("transcripts_mask", "")
if mask_file:
options.extend(["--mask-file", mask_file])
return options
def _get_stranded_flag(config):
strand_flag = {"unstranded": "fr-unstranded",
"firststrand": "fr-firststrand",
"secondstrand": "fr-secondstrand"}
stranded = get_in(config, ("algorithm", "strandedness"), "unstranded").lower()
assert stranded in strand_flag, ("%s is not a valid strandedness value. "
"Valid values are 'firststrand', "
"'secondstrand' and 'unstranded" % (stranded))
flag = strand_flag[stranded]
return ["--library-type", flag]
def _get_output_dir(align_file, data, sample_dir=True):
config = data["config"]
name = data["rgnames"]["sample"] if sample_dir else ""
return os.path.join(get_in(data, ("dirs", "work")), "cufflinks", name)
def assemble(bam_file, ref_file, num_cores, out_dir, data):
out_dir = os.path.join(out_dir, data["rgnames"]["sample"])
safe_makedir(out_dir)
out_file = os.path.join(out_dir, "cufflinks-assembly.gtf")
cufflinks_out_file = os.path.join(out_dir, "transcripts.gtf")
library_type = " ".join(_get_stranded_flag(data["config"]))
if file_exists(out_file):
return out_file
bam_file = bam.convert_cufflinks_mapq(bam_file)
with file_transaction(data, out_dir) as tmp_out_dir:
cmd = ("cufflinks --output-dir {tmp_out_dir} --num-threads {num_cores} "
"--frag-bias-correct {ref_file} "
"--quiet "
"{library_type} --multi-read-correct --upper-quartile-norm {bam_file}")
cmd = cmd.format(**locals())
do.run(cmd, "Assembling transcripts with Cufflinks using %s." % bam_file)
shutil.move(cufflinks_out_file, out_file)
return out_file
def clean_assembly(gtf_file, clean=None, dirty=None):
"""
clean the likely garbage transcripts from the GTF file including:
1. any novel single-exon transcripts
2. any features with an unknown strand
"""
base, ext = os.path.splitext(gtf_file)
db = gtf.get_gtf_db(gtf_file, in_memory=True)
clean = clean if clean else base + ".clean" + ext
dirty = dirty if dirty else base + ".dirty" + ext
if file_exists(clean):
return clean, dirty
logger.info("Cleaning features with an unknown strand from the assembly.")
with open(clean, "w") as clean_handle, open(dirty, "w") as dirty_handle:
for gene in db.features_of_type('gene'):
for transcript in db.children(gene, level=1):
if is_likely_noise(db, transcript):
write_transcript(db, dirty_handle, transcript)
else:
write_transcript(db, clean_handle, transcript)
return clean, dirty
def write_transcript(db, handle, transcript):
for feature in db.children(transcript):
handle.write(str(feature) + "\n")
def is_likely_noise(db, transcript):
if is_novel_single_exon(db, transcript):
return True
if strand_unknown(db, transcript):
return True
def strand_unknown(db, transcript):
"""
for unstranded data with novel transcripts single exon genes
will have no strand information. single exon novel genes are also
a source of noise in the Cufflinks assembly so this removes them
"""
features = list(db.children(transcript))
strand = features[0].strand
if strand == ".":
return True
else:
return False
def is_novel_single_exon(db, transcript):
features = list(db.children(transcript))
exons = [x for x in features if x.featuretype == "exon"]
class_code = features[0].attributes.get("class_code", None)[0]
if len(exons) == 1 and class_code == "u":
return True
return False
def fix_cufflinks_attributes(ref_gtf, merged_gtf, data, out_file=None):
"""
replace the cufflinks gene_id and transcript_id with the
gene_id and transcript_id from ref_gtf, where available
"""
base, ext = os.path.splitext(merged_gtf)
fixed = out_file if out_file else base + ".clean.fixed" + ext
if file_exists(fixed):
return fixed
ref_db = gtf.get_gtf_db(ref_gtf)
merged_db = gtf.get_gtf_db(merged_gtf, in_memory=True)
ref_tid_to_gid = {}
for gene in ref_db.features_of_type('gene'):
for transcript in ref_db.children(gene, level=1):
ref_tid_to_gid[transcript.id] = gene.id
ctid_to_cgid = {}
ctid_to_oid = {}
for gene in merged_db.features_of_type('gene'):
for transcript in merged_db.children(gene, level=1):
ctid_to_cgid[transcript.id] = gene.id
feature = list(merged_db.children(transcript))[0]
oid = feature.attributes.get("oId", [None])[0]
if oid:
ctid_to_oid[transcript.id] = oid
cgid_to_gid = {}
for ctid, oid in ctid_to_oid.items():
cgid = ctid_to_cgid.get(ctid, None)
oid = ctid_to_oid.get(ctid, None)
gid = ref_tid_to_gid.get(oid, None) if oid else None
if cgid and gid:
|
liam2/larray
|
larray/inout/xw_reporting.py
|
Python
|
gpl-3.0
| 35,378
| 0.003307
|
import warnings
from pathlib import Path
from typing import Union
try:
import xlwings as xw
except ImportError:
xw = None
from larray.util.misc import _positive_integer
from larray.core.group import _translate_sheet_name
from larray.core.array import asarray, zip_array_items
from larray.example import load_example_data, EXAMPLE_EXCEL_TEMPLATES_DIR
_default_items_size = {}
def _validate_template_filename(filename: Union[str, Path]) -> Path:
if isinstance(filename, str):
filename = Path(filename)
suffix = filename.suffix
if not suffix:
suffix = '.crtx'
if suffix != '.crtx':
raise ValueError(f"Extension for the excel template file must be '.crtx' instead of {suffix}")
return filename.with_suffix(suffix)
class AbstractReportItem:
def __init__(self, template_dir=None, template=None, graphs_per_row=1):
self.template_dir = template_dir
self.template = template
self.default_items_size = _default_items_size.copy()
self.graphs_per_row = graphs_per_row
@property
def template_dir(self):
r"""
Set the path to the directory containing the Excel template files (with '.crtx' extension).
This method is mainly useful if your template files are located in several directories,
otherwise pass the template directory directly the ExcelReport constructor.
Parameters
----------
template_dir : str or Path
Path to the directory containing the Excel template files.
See Also
--------
set_graph_template
Examples
--------
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> # ... add some graphs using template files from 'C:\excel_templates_dir'
>>> report.template_dir = r'C:\other_templates_dir' # doctest: +SKIP
>>> # ... add some graphs using template files from 'C:\other_templates_dir'
"""
return self._template_dir
@template_dir.setter
def template_dir(self, template_dir):
if template_dir is not None:
if isinstance(template_dir, str):
template_dir = Path(template_dir)
if not isinstance(template_dir, Path):
raise TypeError(f"Expected a string or a pathlib.Path object. "
f"Got an object of type {type(template_dir).__name__} instead.")
if not template_dir.is_dir():
raise ValueError(f"The directory {template_dir} could not be found.")
self._template_dir = template_dir
@property
def template(self):
r"""
Set a default Excel template file.
Parameters
----------
template : str or Path
Name of the template to be used as default template.
The extension '.crtx' will be added if not given.
The full path to the template file must be given if no template directory has been set.
Examples
--------
>>> demo = load_example_data('demography_eurostat')
Passing the name of the template (only if a template directory has been set)
>>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR)
>>> report.template = 'Line'
>>> sheet_population = report.new_sheet('Population')
>>> sheet_population.add_graph(demo.population['Belgium'],'Belgium')
Passing the full path of the template file
>>> # if no default template directory has been set
>>> # or if the new template is located in another directory,
>>> # you must provide the full path
>>> sheet_population.template = r'C:\other_templates_dir\Line_Marker.crtx' # doctest: +SKIP
>>> sheet_population.add_graph(demo.population['Germany'],'Germany') # doctest: +SKIP
"""
return self._template
@template.setter
def template(self, template):
if template is not None:
if self.template_dir is None:
raise RuntimeError("Please set 'template_dir' first")
filename = _validate_template_filename(template)
template = self.template_dir / filename
self._template = template
def set_item_default_size(self, kind, width=None, height=None):
r"""
Override the default 'width' and 'height' values for the given kind of item.
A new value must be provided at least for 'width' or 'height'.
Parameters
----------
kind : str
kind of item for which default values of 'width' and/or 'height' are modified.
Currently available kinds are 'title' and 'graph'.
width : int, optional
new default width value.
height : int, optional
new default height value.
Examples
--------
>>> report = ExcelReport()
>>> report.set_item_default_size('graph', width=450, height=250)
"""
if width is None and height is None:
raise ValueError("No value provided for both 'width' and 'heigth'. "
"Please provide one for at least 'width' or 'heigth'")
if kind not in self.default_items_size:
item_types = sorted(self.default_items_size.keys())
raise ValueError(f"Item type {kind} is not registered. Please choose in list {item_types}")
if width is None:
width = self.default_items_size[kind].width
if height is None:
height = self.default_items_size[kind].height
self.default_items_size[kind] = ItemSize(width, height)
@property
def graphs_per_row(self):
r"""
Default number of graphs per row.
Parameters
----------
graphs_per_row: int
See Also
--------
ReportSheet.newline
"""
return self._graphs_per_row
@graphs_
|
per_row.setter
def graphs_per_row(self, graphs_per_row):
_positive_integer(graphs_per_row)
self._graphs_per_row = graphs_per_row
class AbstractReportSheet(AbstractReportItem):
r"""
Represents a sheet dedicated to contains only graphical items (title banners, graphs).
See :py:obj:`ExcelReport` for use cases.
Parameters
----------
|
template_dir : str or Path, optional
Path to the directory containing the Excel template files (with a '.crtx' extension).
Defaults to None.
template : str or Path, optional
Name of the template to be used as default template.
The extension '.crtx' will be added if not given.
The full path to the template file must be given if no template directory has been set.
Defaults to None.
graphs_per_row : int, optional
Default number of graphs per row. Defaults to 1.
See Also
--------
ExcelReport
"""
def add_title(self, title, width=None, height=None, fontsize=11):
r"""
Add a title item to the current sheet.
Note that the current method only add a new item to the list of items to be generated.
The report Excel file is generated only when the :py:obj:`~ExcelReport.to_excel` is called.
Parameters
----------
title : str
Text to write in the title item.
width : int, optional
width of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
height : int, optional
height of the title item. The current default value is used if None
(see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None.
fontsize : int, optional
fontsize of the displayed text. Defaults to 11.
Examples
--------
>>> report = ExcelReport()
>>> first_sheet = report.new_sheet('First_sheet')
>>> first_sheet.add_title('Title banner with default width, height and fontsize')
>>> first_sheet.add_title('Larger title banner', width=1200, height=100)
>>> first_sheet.add_title('Bigger fontsize', fontsize=13)
>>> # do not forget to call 'to_excel' to cre
|
mofei2816/mervinz
|
blog/apps.py
|
Python
|
mit
| 1,239
| 0
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2016 Mervin <mofei2816@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sel
|
l
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITE
|
D TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from django.apps import AppConfig
class ProtalConfig(AppConfig):
name = 'blog'
verbose_name = 'Blog'
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/raw/WGL/EXT/pbuffer.py
|
Python
|
lgpl-3.0
| 1,629
| 0.031921
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.WGL import _types as _cs
# End users want this...
from OpenGL.raw.WGL._types import *
from OpenGL.raw.WGL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'WGL_EXT_pbuffer'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.WGL,'WGL_EXT_pbuffer',error_checker=_errors._error_checker)
WGL_DRAW_TO_PBUFFER_EXT=_C('WGL_DRAW_TO_PBUFFER_EXT',0x202D)
WGL_MAX_PBUFFER_HEIGHT_EXT=_C('WGL_MAX_PBUFFER_HEIGHT_EXT',0x2030)
WGL_MAX_PBUFFER_PIXELS_EXT=_C('WGL_MAX_PBUFFER_PIXELS_EXT',0x202E)
WGL_MAX_PBUFFER_WIDTH_EXT=_C('WGL_MAX_PBUFFER_WIDTH_EXT',0x202F)
WGL_OPTIMAL_PBUFFER_HEIGHT_EXT=_C('WGL_OPTIMAL_PBUFFER_HEIGHT_EXT',0x2032)
WGL_OPTIMAL_PBUFFER_WIDTH_EXT=_C('WGL_OPTIMAL_PBUFFER_WIDTH_EXT',0x2031)
WGL_PBUFFER_HEIGHT_EXT=_C('WGL_PBUFFER_HEIGHT_EXT',0x2035)
WGL_PBUFFER_LARGEST_EXT=_C('WGL_PBUFFER_LARGEST_EXT',0x2033)
WGL_PBUFFER_WIDTH_EXT=_C('WGL_PBUFFER_WIDTH_EXT',0x2034)
@_f
@_p.
|
types(_cs.HPBUFFEREXT,_cs.HDC,_cs.c_int,_cs.c_int,_cs.c_int,ctypes.POINTER(_cs.c_int))
def wglCreatePbufferEXT(hDC,iPixelFormat,iWidth,iHeight,piAttribList):pass
@_f
@_p.types(_cs.BOOL,_cs.HPBUFFEREXT)
def wglDestroyPbufferEXT(hPbuffer):pass
@_f
@_p.types(_cs.HDC,_cs.HPBUFFEREXT)
def wglGetPbufferDC
|
EXT(hPbuffer):pass
@_f
@_p.types(_cs.BOOL,_cs.HPBUFFEREXT,_cs.c_int,ctypes.POINTER(_cs.c_int))
def wglQueryPbufferEXT(hPbuffer,iAttribute,piValue):pass
@_f
@_p.types(_cs.c_int,_cs.HPBUFFEREXT,_cs.HDC)
def wglReleasePbufferDCEXT(hPbuffer,hDC):pass
|
eric-stanley/freeciv-android
|
lib/freeciv/dropbox_civsync.py
|
Python
|
gpl-2.0
| 1,258
| 0.007154
|
import sync
import ui
import features
import functools
import save as _save
from freeciv.dropbox import get_download_path
def _impl_save(name, path):
def _do():
data = open(path, 'rb').read()
sync.request_with_sid('/sync/upload_save',
name=name,
sharing=features.get('civsync.allow_sharing'),
post_data=data)
ui.async(_do)
def load_from_drop
|
box():
ui.message('Listing saves from Dropbox...')
ui.async(
lambda: sync.json_request_with_sid('/sync/list'),
then=load_dialog)
def load_dialog(entries):
menu = ui.LinearLayoutWidget()
menu.add(ui.Label('Save your games to folder /Applications/Freeciv in your Dropbox.'))
for entry in entries:
menu.add(ui.Button(entry, functools.partial(load_dropbox_save, entry)))
ui.set(ui.S
|
crollWrapper(menu))
def load_dropbox_save(name):
def load_save(data):
with open(get_download_path(), 'wb') as f:
f.write(data)
_save.load_game(get_download_path())
ui.message('Fetching save...')
return ui.async(lambda: sync.request_with_sid('/sync/download', name=name),
then=load_save)
def login():
pass
|
mdavid8/WebSpark
|
spark_webservice_demo.py
|
Python
|
mit
| 1,997
| 0.031047
|
# Copyright 2015 David Wang. All rights reserved.
# Use of this source c
|
ode is governed by MIT license.
# Please see LICENSE file
# WebSpark
# Spark web service demo
# version 0.2
# use REPL or define sc SparkContext
import urllib2, urllib
import math
import time
import traceback
# Spark Web Application demo with parallel processing
# see demoservice function
ServerAddr="http://<enter WebSpark IP address here>:8001"
RegisterURL=ServerAddr + "/addapi?"
RespondURL=ServerAddr + "/respond?"
errwaitseconds = 3
element = '<li class="list
|
-group-item">first prime above %d is %d</li>'
with open('template.html') as f:
template = f.read()
def slow_isprime(num):
if num<2:
return False
for i in range(2, int(math.sqrt(num))+1):
if num%i==0:
return False
return True
def firstprimeabove(num):
i=num+1
while True:
if slow_isprime(i):
return i
i+=1
servicename = 'demo'
# Spark Web Application demo
def demo(url):
rawdata = range(1000, 20000, 1100)
data = sc.parallelize(rawdata)
above=data.map(lambda x: (x, firstprimeabove(x))).collect()
primelist=[element%x for x in above]
response = template % ' '.join(primelist)
return response
def parserequest(rawrequest):
lines = rawrequest.split('\n')
if len(lines)<4:
print 'incorrect WebSpark request'
else:
name = lines[0]
url = lines[1]
remoteaddr = lines[2]
header = lines[3:]
return name, url, remoteaddr, header
st =''
# publish web service with WebSpark
while True:
try:
url = RegisterURL + urllib.urlencode({'name': servicename})
conn = urllib2.urlopen(url)
data = conn.read()
conn.close()
name, clienturl, remoteaddr, header = parserequest(data)
print name, clienturl, remoteaddr, header
response = demo(clienturl)
url = RespondURL + urllib.urlencode({'name': name})
conn = urllib2.urlopen(url, response)
conn.close()
except Exception as ex:
print 'error connecting to WebSpark at', ServerAddr
traceback.print_exc()
time.sleep(errwaitseconds)
continue
|
pinax/pinax-teams
|
pinax/teams/tests/urls.py
|
Python
|
mit
| 178
| 0
|
from django.conf.ur
|
ls import include, url
|
urlpatterns = [
url(r"^account/", include("account.urls")),
url(r"^", include("pinax.teams.urls", namespace="pinax_teams")),
]
|
derekperry/oaxmlapi
|
oaxmlapi/__init__.py
|
Python
|
mit
| 123
| 0
|
# Set module
|
s to be exported with "from oaxmlapi
|
import *"
__all__ = ['commands', 'connections', 'datatypes', 'utilities']
|
herove/dotfiles
|
sublime/Packages/Package Control/package_control/http/debuggable_http_response.py
|
Python
|
mit
| 2,329
| 0.000429
|
try:
# Python 3
from http.client import HTTPResponse, IncompleteRead
str_cls = str
except (ImportError):
# Python 2
from httplib import HTTPResponse, IncompleteRead
str_cls = unicode
from ..console_write import console_write
class DebuggableHTTPResponse(HTTPResponse):
"""
A custom HTTPResponse that formats debugging info for Sublime Text
"""
_debug_protocol = 'HTTP'
def __init__(self, sock, debuglevel=0, method=None, **kwargs):
# We have to use a positive debuglevel to get it passed to here,
# however we don't want to use it because by default debugging prints
# to the stdout and we can't capture it, so we use a special -1 value
if debuglevel == 5:
debuglevel = -1
HTTPResponse.__init__(self, sock, debuglevel=debuglevel, method=method)
def begin(self):
return_value = HTTPResponse.begin(self)
if self.debuglevel == -1:
# Python 2
if hasattr(self.msg, 'headers'):
headers = [line.rstrip() for line in self.msg.headers]
# Python 3
else:
headers = []
for header in self.msg:
headers.append("%s: %s" % (header, self.msg[header]))
versions = {
|
9: u'HTTP/0.9',
10: u'HTTP/1.0',
11: u'HTTP/1.1'
}
status_line = u'%s %s %s' % (versions[self.version], str_cls(self.status), self.reason)
headers.insert(0, status_line)
indented_headers = u'\n '.join(headers)
console_write(
|
u'''
Urllib %s Debug Read
%s
''',
(self._debug_protocol, indented_headers)
)
return return_value
def is_keep_alive(self):
# Python 2
if hasattr(self.msg, 'headers'):
connection = self.msg.getheader('connection')
# Python 3
else:
connection = self.msg['connection']
if connection and connection.lower() == 'keep-alive':
return True
return False
def read(self, *args):
try:
return HTTPResponse.read(self, *args)
except (IncompleteRead) as e:
return e.partial
|
kennethlove/django_bookmarks
|
dj_bookmarks/bookmarks/urls/collections.py
|
Python
|
bsd-3-clause
| 278
| 0.003597
|
from django.conf.urls imp
|
ort url
from ..views import collec
|
tions as views
urlpatterns = [
url('^create/$', views.Create.as_view(), name='create'),
url('^c:(?P<slug>[-\w]+)/$', views.Detail.as_view(), name='detail'),
url('^$', views.List.as_view(), name='list'),
]
|
mryanlam/f5-ansible
|
library/iworkflow_service.py
|
Python
|
gpl-3.0
| 14,907
| 0.001006
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: iworkflow_service
short_description: Manages L4/L7 Services on iWorkflow.
description:
- Manages L4/L7 Service on iWorkflow. Services can only be created and
otherwise managed by tenants on iWorkflow. Since all of the F5 modules
assume the use of the administrator account, the user of this module
will need to include the C(tenant) option if they want to use this
module with the admin account.
version_added: "2.4"
options:
tenant:
des
|
cription:
- The tenant whose service is going to be managed. This is a required
option when using the system's C(admin) acc
|
ount as the admin is not
a tenant, and therefore cannot manipulate any of the L4/L7 services
that exist. If the C(user) option is not the C(admin) account, then
this tenant option is assumed to be the user who is connecting to
the BIG-IP. This assumption can always be changed by setting this
option to whatever tenant you wish.
required: False
default: None
name:
description:
- Name of the L4/L7 service.
required: True
parameters:
description:
- A dictionary containing the values of input parameters that the
service administrator has made available for tenant editing.
required: False
default: None
connector:
description:
- The cloud connector associated with this L4/L7 service. This option
is required when C(state) is C(present).
required: False
default: None
service_template:
description:
- The Service Template that you want to base this L4/L7 Service off of.
This option is required when C(state) is C(present).
required: False
default: None
notes:
- Requires the f5-sdk Python package on the remote host. This is as easy as
pip install f5-sdk.
- L4/L7 Services cannot be updated once they have been created. Instead, you
must first delete the service and then re-create it.
requirements:
- f5-sdk >= 2.3.0
- iWorkflow >= 2.1.0
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
'''
RETURN = '''
'''
from ansible.module_utils.f5_utils import *
class Parameters(AnsibleF5Parameters):
api_map = {
'properties': 'connector'
}
returnables = ['vars']
api_attributes = [
'name', 'vars', 'tables', 'tenantTemplateReference', 'tenantReference', 'properties'
]
updatables = ['tables', 'vars']
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k,v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def _get_connector_collection(self):
return self.client.api.cm.cloud.connectors.locals.get_collection()
def _get_connector_selflink(self, connector, collection):
for resource in collection:
if str(resource.displayName) != "BIG-IP":
continue
if str(resource.name) != connector:
continue
return str(resource.selfLink)
return None
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
except Exception:
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
def _username_has_admin_role(self, username):
collection = self._get_users_with_admin_role()
for resource in collection.userReferences:
if resource.name == username:
return True
return False
def _get_users_with_admin_role(self):
return self.client.shared.authz.roles_s.role.load(
name='Administrator',
requests_params=dict(
params='$expand=userReferences'
)
)
@property
def tenant(self):
if self._values['tenant'] is None:
if self._username_has_admin_role(self.want.user):
raise F5ModuleError(
"A 'tenant' must be specified when using an "
"Administrator account"
)
else:
# This allows tenant users to assume their username
# is the tenant that is interacting with iWorkflow.
return str(self.want.user)
else:
return str(self._values['tenant'])
@property
def tables(self):
result = []
if not self._values['tables']:
return None
tables = self._values['tables']
for table in tables:
tmp = dict()
name = table.get('name', None)
if name is None:
raise F5ModuleError(
"One of the provided tables does not have a name"
)
tmp['name'] = str(name)
columns = table.get('columns', None)
if columns:
tmp['columns'] = []
for column in columns:
tmp['columns'].append(
dict((str(k),str(v)) for k,v in iteritems(column))
)
# You cannot have rows without columns
rows = table.get('rows', None)
if rows:
tmp['rows'] = list(list())
for row in rows:
tmp['rows'][0].append([str(x) for x in row])
description = table.get('description', None)
if description:
tmp['description'] = str(description)
section = table.get('section', None)
if section:
tmp['section'] = str(section)
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
@tables.setter
def tables(self, value):
self._values['tables'] = value
@property
def vars(self):
result
|
jwir3/gitbranchhealth
|
tests/testutil.py
|
Python
|
mpl-2.0
| 2,053
| 0.006332
|
import unittest
import tempfile
import os
from os.path import join
import zipfile
from git import *
from shutil import rmtree
from gitbranchhealth.branchhealth import BranchHealthConfig
class GitRepoTest(unittest.TestCase):
def setUp(self):
self.__mOriginTempDir = tempfile.mkdtemp(prefix='gitBranchHealthTest')
self.assertTrue(os.path.exists(self.__mOriginTempDir))
# Create our origin first
testRepoZipPath = join(self.__findTestDir(), 'testrepo.zip')
zipFh = open(testRepoZipPath, 'rb')
testRepoZip = zipfile.ZipFile(zipFh)
for name in testRepoZip.namelist():
testRepoZip.extract(name, self.__mOriginTempDir)
zipFh.close()
self.__mOriginGitRepoPath = os.path.join(self.__mOriginTempDir, 'testrepo')
originRepo = Repo(self.__mOriginGitRepoPath)
self.__mTempDir = tempfile.mkdtemp(prefix='gitBranchHealthTest')
os.mkdir(os.path.join(self.__mTempDir, 'testrepo'))
self.assertTrue(os.path.exists(self.__mTempDir))
# Now create the local repo
self.__mGi
|
tRepoPath = os.path.join(self.__mTempDir, 'testrepo')
originRepo.clone(self.__mGitRepoPath)
self.assertTrue(os.path.exists(self.__mGitRepoPath))
self.__mConfig = BranchHealthConfig(self.__mGitRepoPath)
self.__trackAllRemoteBranches()
def tearDow
|
n(self):
pass
# rmtree(self.__mTempDir)
# rmtree(self.__mOriginTempDir)
def getConfig(self):
return self.__mConfig
def getTempDir(self):
return self.__mTempDir
## Private API ###
def __trackAllRemoteBranches(self):
repo = Repo(self.__mGitRepoPath)
for remote in repo.remotes:
for branch in remote.refs:
localBranchName = branch.name.split('/')[-1]
if localBranchName != 'master' and localBranchName != 'HEAD':
repo.git.checkout(branch.name, b=localBranchName)
repo.heads.master.checkout()
def __findTestDir(self):
# Find the file called 'testrepo.zip', starting at the current dir
for (root, dirs, files) in os.walk('.'):
if 'testrepo.zip' in files:
return root
|
dvdmgl/django-pg-fts
|
pg_fts/__init__.py
|
Python
|
bsd-2-clause
| 63
| 0
|
from __future__ import unicode_literals
__VERSION__
|
= '
|
0.1.1'
|
hufman/flask_rdf
|
examples/browser_default.py
|
Python
|
bsd-2-clause
| 751
| 0.002663
|
#!/usr/bin/env python
from rdflib import Graph, BNode, Literal, URIRef
from rdflib.namespace import FOAF
from flask import Flask
import fl
|
ask_rdf
import random
app = Flask(__name__)
# set up a custom formatter to return turtle in text/plain to browsers
custom_formatter = flask_rdf.FormatSelector()
custom_formatter.wildcard_mimetype = 'text/plain'
custom_formatter.add_format('text/plain', 'turtle')
custom_decorator = flask_rdf.flask.Decorator(custom_formatter)
@app.route('/')
@app.route('/<path:path>')
@custom_decorator
def random_age(path=''):
graph = Graph('IOMemory', BNode())
graph.add((URIRef(
|
path), FOAF.age, Literal(random.randint(20, 50))))
return graph
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
jucimarjr/IPC_2017-1
|
lista02/lista02_exercicio01_questao06.py
|
Python
|
apache-2.0
| 781
| 0.019481
|
#-----------------------------------------------------------------------------------------------------------------------
#Introdução a Programação de Computadores - IPC
#Universidade do Estado do Amazonas - UEA
#Prof. Jucimar Jr.
#Alexandre Marques Uchôa 1715310028
#Jandinne Duarte de Oliveira 1015070265
#Uriel Brito Barros 1515120558
#Roberta de Oliveira da cruz 0825070169
#Evandro Padilha Barroso Filho 1715310009
#
##
#Faça um Programa que peça o raio de um círculo, calcule e mostre sua área.
#---------------------------------------------------------
|
-------------------------
|
-------------------------------------
r = float(input("Digite um raio"))
area = (3.14*r*r)
print ('Sua área é', area)
|
bitmazk/cmsplugin-django-outlets
|
cmsplugin_outlets/cms_app.py
|
Python
|
mit
| 360
| 0
|
"""CMS apphook for the django-
|
outlets app."""
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from . import men
|
u
class OutletsApphook(CMSApp):
name = _("Outlets Apphook")
urls = ["outlets.urls"]
menus = [menu.OutletsMenu]
apphook_pool.register(OutletsApphook)
|
KhronosGroup/COLLADA-CTS
|
StandardDataSets/collada/library_cameras/camera/optics/orthographic/optics_orthographic_zfar/optics_orthographic_zfar.py
|
Python
|
mit
| 4,007
| 0.009234
|
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = ['library_cameras', 'camera', 'optics', 'technique_common', 'orthographic', 'zfar']
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
|
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
# Baseline
def JudgeBaseline(self, context):
# No step should crash
self.__assistant.CheckCrashes(context)
# Imp
|
ort/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# Superior
def JudgeSuperior(self, context):
# if baseline fails, no point in further checking
if (self.status_baseline == False):
self.status_superior = self.status_baseline
return self.status_superior
# Compare the rendered images, then compare against reference
# Then check for preservation of element data
if ( self.__assistant.CompareRenderedImages(context) ):
if ( self.__assistant.CompareImagesAgainst(context, "_reference_optics_orthographic_zfar_znear", None, None, 5, True, False) ):
self.__assistant.ElementDataPreserved(context, self.tagList, "float")
self.status_superior = self.__assistant.DeferJudgement(context)
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
|
UKPLab/sentence-transformers
|
examples/training/sts/training_stsbenchmark_continue_training.py
|
Python
|
apache-2.0
| 3,514
| 0.005407
|
"""
This example loads the pre-trained SentenceTransformer model 'nli-distilroberta-base-v2' from the server.
It then fine-tunes this model for some epochs on the STS benchmark dataset.
Note: In this example, you must specify a SentenceTransformer model.
If you want to fine-tune a huggingface/transformers model like bert-base-uncased, see training_nli.py and training_stsbenchmark.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
#Check if dataset exsist. If not, download and extract it
sts_dataset_path = 'datasets/stsbenchmark.tsv.gz'
if not os.path.exists(sts_dataset_path):
util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path)
# Read the dataset
model_name = 'nli-distilroberta-base-v2'
train_batch_size = 16
num_epochs = 4
model_save_path = 'output/training_stsbenchmark_continue_training-'+model_name+'-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Load a pre-trained sentence transformer model
model = SentenceTransformer(model_name)
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row['sentence1'], row['sentence2']], label=score)
if row['split'] == 'dev':
dev_samples.append(inp_example)
elif row['split'] == 'test':
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
# Development set: Measure correlation between cosine score and gold labels
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) #10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
|
output_path=model_save_path)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-
|
test')
test_evaluator(model, output_path=model_save_path)
|
alfred82santa/dirty-validators
|
tests/dirty_validators/tests_basic.py
|
Python
|
mit
| 26,469
| 0.002909
|
from unittest import TestCase
from dirty_validators.basic import (BaseValidator, EqualTo, NotEqualTo, StringNotContaining, Length, NumberRange,
Regexp, Email, IPAddress, MacAddress, URL, UUID, AnyOf, NoneOf,
IsEmpty, NotEmpty, NotEmptyString, IsNone, NotNone)
import re
class TestBaseValidator(TestCase):
def setUp(self):
self.validator = BaseValidator()
def tearDown(self):
pass
def test_validate_any(self):
self.assertTrue(self.validator.is_valid(None))
self.assertDictEqual(self.validator.messages, {})
self.assertTrue(self.validator.is_valid(3))
self.assertDictEqual(self.validator.messages, {})
self.assertTrue(self.validator.is_valid('aaa'))
self.assertDictEqual(self.validator.messages, {})
self.assertTrue(self.validator.is_valid({}))
self.assertDictEqual(self.validator.messages, {})
def test_error_not_hidden_behaviour(self):
error_key = 'Test key'
error_message = "'$value' is the value error to test hidden feature"
self.validator.error_messages = {error_key: error_message}
self.validator.error(error_key, 'Not hidden')
self.assertEqual(self.validator.messages,
{error_key: "'Not hidden' is the value error to test hidden feature"})
def test_error_hidden_behaviour(self):
hidden_validator = BaseValidator(hidden=True)
error_key = 'Test key'
error_message = "'$value' is the value error to test hidden feature"
hidden_validator.error_messages = {error_key: error_message}
hidden_validator.error(error_key, 'Will it be hidden?')
self.assertEqual(hidden_validator.messages,
{error_key: "'**Hidden**' is the value error to test hidden feature"})
class TestEqualTo(TestCase):
def setUp(self):
self.validator = EqualTo(comp_value="aaa")
def tearDown(self):
pass
def test_validate_str_success(self):
self.assertTrue(self.validator.is_valid("aaa"))
self.assertDictEqual(self.validator.messages, {})
def test_validate_str_fail(self):
self.assertFalse(self.validator.is_valid("aqaa"))
self.assertDictEqual(self.validator.messages, {EqualTo.NOT_EQUAL: "'aqaa' is not equal to 'aaa'"})
def test_validate_int_success(self):
self.validator = EqualTo(comp_value=3)
self.assertTrue(self.validator.is_valid(3))
self.assertDictEqual(self.validator.messages, {})
def test_validate_int_fail(self):
self.validator = EqualTo(comp_value=3)
self.assertFalse(self.validator.is_valid(4))
self.assertDictEqual(self.validator.messages, {EqualTo.NOT_EQUAL: "'4' is not equal to '3'"})
def test_validate_int_fail_custom_error_message(self):
self.validator = EqualTo(comp_value=3, error_messages={EqualTo.NOT_EQUAL: "$value $value aaa $comp_value"})
self.assertFalse(self.validator.is_valid(4))
self.assertDictEqual(self.validator.messages, {EqualTo.NOT_EQUAL: "4 4 aaa 3"})
def test_validate_int_fail_custom_error_code(self):
self.validator = EqualTo(comp_value=3, error_code_map={EqualTo.NOT_EQUAL: "newError"})
self.assertFalse(self.validator.is_valid(4))
self.assertDictEqual(self.validator.messages, {"newError": "'4' is not equal to '3'"})
def test_validate_int_fail_custom_error_code_and_error_message(self):
self.validator = EqualTo(comp_value=3,
error_code_map={EqualTo.NOT_EQUAL: "newError"},
error_messages={EqualTo.NOT_EQUAL: "$value $value aaa $comp_value"})
self.assertFalse(self.validator.is_valid(4))
self.assertDictEqual(self.validator.messages, {"newError": "4 4 aaa 3"})
def test_validate_int_fail_custom_error_code_error_message_and_custom_value(self):
self.validator = EqualTo(comp_value=3,
error_code_map={EqualTo.NOT_EQUAL: "newError"},
error_messages={EqualTo.NOT_EQUAL: "$value $value aaa $comp_value $value1 $value2"},
message_values={"value1": "aaaaaa1", "value2": "eeeeee1"})
self.assertFalse(self.validator.is_valid(4))
self.assertDictEqual(self.validator.messages, {"newError": "4 4 aaa 3 aaaaaa1 eeeeee1"})
class TestNotEqualTo(TestCase):
def setUp(self):
self.validator = NotEqualTo(comp_value="aaa")
def tearDown(self):
pass
def test_validate_str_success(self):
self.assertTrue(self.validator.is_valid("aqaa"))
self.assertDictEqual(self.validator.messages, {})
def test_validate_str_fail(self):
self.assertFalse(self.validator.is_valid("aaa"))
self.assertDictEqual(self.validator.messages, {NotEqualTo.IS_EQUAL: "'aaa' is equal to 'aaa'"})
def test_validate_int_success(self):
self.validator = NotEqualTo(comp_value=3)
self.assertTrue(self.validator.is_valid(4))
self.assertDictEqual(self.validator.messages, {})
def test_validate_int_fail(self):
self.validator = NotEqualTo(comp_value=3)
self.assertFalse(self.validator.is_valid(3))
self.assertDictEqual(self.validator.messages, {NotEqualTo.IS_EQUAL: "'3' is equal to '3'"})
class TestStringNotContaining(TestCase):
def setUp(self):
self.validator = StringNotContaining(token='Test_TOKEN')
def test_validate_string_contains(self):
self.assertFalse(self.validator.is_valid('This string contains Test_TOKEN for sure'))
self.assertDictEqual(self.validator.messages,
{StringNotContaining.NOT_CONTAINS:
"'This string contains Test_TOKEN for sure' contains 'Test_TOKEN'"})
def test_validate_string_not_contains(self):
self.assertTrue(self.validator.is_valid('This string does not contain TESt_TOKEN for sensitive cases'))
def test_validate_string_contains_not_sensitive(self):
self.validator.case_sensitive = False
self.assertFalse(self.validator.is_valid('This string contains TESt_TOKEN for sensitive cases'))
class TestLength(TestCase):
def setUp(self):
self.validator = Length(min=3, max=6)
def tearDown(self):
pass
def test_validate_str_success(self):
self.assertTrue(self.validator.is_valid("aqaa"))
self.assertDictEqual(self.validator.messages, {})
def test_validate_str_fail_short(self):
self.assertFalse(self.validator.is_valid("aa"))
self.assertDictEqual(self.validator.messages, {Length.TOO_SHORT: "'aa' is less than 3 unit length"})
def test_validate_str_fail_long(self):
self.assertFalse(self.validator.is_valid("aabbnnmm"))
self.assertDictEqual(self.validator.messages, {Length.TOO_LONG: "'aabbnnmm' is more than 6 unit length"})
def test_validate_int_fail(self):
self.assertFalse(self.validator.is_valid(5))
self.assertDictEqual(self.validator.messages, {Length.INVALID_TYPE: "'5' has no length"})
def test_validate_list_succ
|
ess(self):
self.assertTrue(self.validator.is_valid(["1a", "32d", "tr", "wq"]))
self.assertDictEqual(self.validator.messages, {})
def test_validate_list_fail_short(self):
self.assertFalse(self.validator.is_valid(["1a"]))
self.assertDictEqual(self.validator.messages, {Length.TOO_SHORT: "'['1a']' is less than 3 unit length"})
def test_validate_list_fail_long(self):
self.assertFalse(self.validator.is_valid(["1a", "32d", "tr", "wq", "qwqw", "dd", "as
|
", "er"]))
self.assertDictEqual(self.validator.messages,
{Length.TOO_LONG:
"'['1a', '32d', 'tr', 'wq', 'qwqw', 'dd', 'as', 'er']' is more than 6 unit length"})
class TestNumberRange(TestCase):
def setUp(self):
self.validator = NumberRange(min=3, max=4)
def tearDown(self):
pass
def test_validate_int_success(self):
self.assertTrue(self.validator.is_valid(4))
self.a
|
beschulz/kyototycoon
|
bench/benchmark.py
|
Python
|
gpl-2.0
| 3,361
| 0.046712
|
# -*- coding: utf-8 -*-
#from pykt import *
from pykt import KyotoTycoon as kt1
from kyototycoon import DB as kt2
from pykt_emu import KyotoTycoon as kt3
import timeit
from cPickle import dumps, loads
key = "A" * 12
val = "B" * 1024
def bench_set(Impl):
db = Impl()
db.open()
ret = db.set(key, val)
assert ret == True
db.close()
pykt_set = lambda : bench_set(kt1)
kyoto_set = lambda : bench_set(kt2)
def bench_get(Impl):
db = Impl()
db.open()
ret = db.get(key)
assert ret == val
db.close()
pykt_get = lambda:bench_get(kt1)
kyoto_get = lambda:bench_get(kt2)
def bench_gets(Impl):
db = Impl()
db.open()
for i in xrange(10):
ret = db.get(key)
assert ret == val
db.close()
pykt_gets = lambda:bench_gets(kt1)
kyoto_gets = lambda:bench_gets(kt2)
def bench_increment(Impl):
db = Impl()
db.open()
ret = db.increment("N", 1)
db.close()
pykt_increment = lambda : bench_increment(kt1)
kyoto_increment = lambda : bench_increment(kt2)
def bench_replace(Impl):
db = Impl()
db.open()
ret = db.replace(key, val)
assert ret == True
db.close()
pykt_replace = lambda : bench_replace(kt1)
kyoto_replace = lambda : bench_replace(kt2)
def bench_append(Impl):
db = Impl()
db.open()
ret = db.append(key, val)
assert ret == True
db.close()
pykt_append = bench_append(kt1)
kyoto_append = bench_append(kt3)
implementations = (
('pykt' , kt1),
('kyototycoon' , kt2),
('pykt emu' , kt3),
)
ops = (
('set' , bench_set),
('get' , bench_get),
('gets' , bench_gets),
('increment', bench_increment),
|
('replace' , bench_replace),
#('append' , bench_append),
)
if __name__ == "__main__":
print ' '*16 + '\t'.join(map(lambda x: '%15s' % (x[0]), ops)) + ' total'
for impl_name, impl in implementations:
db=impl()
db.open()
db.clear()
print '%15s' % (impl_name),
total = 0.0
for op_name, op in ops:
bound = lambda:op(impl)
t = timeit.Timer(bound)
bound()#warmup
#t.timeit(number=100)#warmup
res = t.timeit(number=1000)
total += res
print '%2.13f'
|
%(res),
print '%2.13f'%(total)
'''
res = timeit.timeit("pykt_set()", "from __main__ import pykt_set", number=1000)
print "pykt_set %f" % res
#res = timeit.timeit("pykt_replace()", "from __main__ import pykt_replace", number=1000)
#print "pykt_replace %f" % res
res = timeit.timeit("kyoto_set()", "from __main__ import kyoto_set", number=1000)
print "kt_set %f" % res
#res = timeit.timeit("pykt_append()", "from __main__ import pykt_append", number=1000)
#print "pykt_append %f" % res
#res = timeit.timeit("kyoto_append()", "from __main__ import kyoto_append", number=1000)
#print "kt_append %f" % res
res = timeit.timeit("pykt_get()", "from __main__ import pykt_get", number=1000)
print "pykt_get %f" % res
res = timeit.timeit("kyoto_get()", "from __main__ import kyoto_get", number=1000)
print "kt_get %f" % res
res = timeit.timeit("pykt_gets()", "from __main__ import pykt_gets", number=100)
print "pykt_gets %f" % res
res = timeit.timeit("kyoto_gets()", "from __main__ import kyoto_gets", number=100)
print "kt_gets %f" % res
res = timeit.timeit("pykt_increment()", "from __main__ import pykt_increment", number=1000)
print "pykt_increment %f" % res
res = timeit.timeit("kyoto_increment()", "from __main__ import kyoto_increment", number=1000)
print "kt_increment %f" % res
'''
|
dpaiton/OpenPV
|
pv-core/analysis/python/plot_hamming.py
|
Python
|
epl-1.0
| 3,742
| 0.031267
|
"""
Plots the Histogram
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import PVReadWeights as rw
import PVConversions as conv
from pylab import save
import math
import random
if len(sys.argv) < 3:
print "usage: hamming filename value"
print len(sys.argv)
sys.exit()
w = rw.PVReadWeights(sys.argv[1])
space = 1
d = np.zeros((4,4))
wmax = w.max
nx = w.nx
ny = w.ny
nxp = w.nxp
nyp = w.nyp
numpat = w.numPatches
nf = w.nf
margin = 0
marginstart = margin
marginend = nx - margin
acount = 0
patchposition = []
supereasytest = 1
# create feature list for comparing weights from on and off cells
f = np.zeros(w.patchSize)
f2 = np.zeros(w.patchSize)
fe1 = []
fe2 = []
fe3 = []
fe4 = []
fe
|
5 = []
fe6 = []
fe7 = []
fe8 = []
fe9 = []
fe10 = []
fcomp = []
f = w.normalize(f)
f2 = w.normalize(f2)
# vertical lines from right side
f = np.zeros([w.nxp, w.nyp]) # first line
f[:,0] = 1
fe1.append(f)
f = np.zeros([w.nxp, w.nyp]) # second line
f[:,1] = 1
fe2.append(f)
f2 = np.zeros([w.nxp, w.nyp]) # third line
f2[:,2] = 1
|
fe3.append(f2)
f = np.zeros([w.nxp, w.nyp])
f[:,3] = 1
fe4.append(f)
f = np.zeros([w.nxp, w.nyp])
f[:,4] = 1
fe5.append(f)
#horizontal lines from the top
f = np.zeros([w.nxp, w.nyp])
f[0,:] = 1
fe6.append(f)
f = np.zeros([w.nxp, w.nyp])
f[1,:] = 1
fe7.append(f)
f = np.zeros([w.nxp, w.nyp])
f[2,:] = 1
fe8.append(f)
f = np.zeros([w.nxp, w.nyp])
f[3,:] = 1
fe9.append(f)
f = np.zeros([w.nxp, w.nyp])
f[4,:] = 1
fe10.append(f)
#print "f1", fe1
#print "f2", fe2
#print "f3", fe3
#print "f4", fe4
#print "f5", fe5
#print "f6", fe6
#print "f7", fe7
#print "f8", fe8
#print "f9", fe9
#print "f10", fe10
fe1 = np.reshape(fe1, (25))
fe2 = np.reshape(fe2, (25))
fe3 = np.reshape(fe3, (25))
fe4 = np.reshape(fe4, (25))
fe5 = np.reshape(fe5, (25))
fe6 = np.reshape(fe6, (25))
fe7 = np.reshape(fe7, (25))
fe8 = np.reshape(fe8, (25))
fe9 = np.reshape(fe9, (25))
fe10 = np.reshape(fe10, (25))
def whatFeature(k):
result = []
fcomp = []
for i in range(len(k)):
if k[i] > (0.5 * wmax):
k[i] = 1
else:
k[i] = 0
diff1 = 0
diff2 = 0
diff3 = 0
diff4 = 0
diff5 = 0
diff6 = 0
diff7 = 0
diff8 = 0
diff9 = 0
diff10 = 0
for a, b in zip(k, fe1):
if a!=b:
diff1+=1
for a, b in zip(k, fe2):
if a!=b:
diff2+=1
for a, b in zip(k, fe3):
if a!=b:
diff3+=1
for a, b in zip(k, fe4):
if a!=b:
diff4+=1
for a, b in zip(k, fe5):
if a!=b:
diff5+=1
for a, b in zip(k, fe6):
if a!=b:
diff6+=1
for a, b in zip(k, fe7):
if a!=b:
diff7+=1
for a, b in zip(k, fe8):
if a!=b:
diff8+=1
for a, b in zip(k, fe9):
if a!=b:
diff9+=1
for a, b in zip(k, fe10):
if a!=b:
diff10+=1
dres = [diff1, diff2, diff3, diff4, diff5, diff6, diff7, diff8, diff9, diff10]
result = np.min(dres)
return result
space = 1
w = rw.PVReadWeights(sys.argv[1])
coord = 1
coord = int(coord)
nx = w.nx
ny = w.ny
nxp = w.nxp
nyp = w.nyp
numpat = w.numPatches
nf = w.nf
margin = 0
start = margin
marginend = nx - margin
nx_im = nx * (nxp + space) + space
ny_im = ny * (nyp + space) + space
im = np.zeros((nx_im, ny_im))
im[:,:] = (w.max - w.min) / 2.
where = []
zep = 0
for k in range(numpat):
kx = conv.kxPos(k, nx, ny, nf)
ky = conv.kyPos(k, nx, ny, nf)
p = w.next_patch()
if len(p) != nxp * nyp:
continue
acount+=1
a = whatFeature(p)
zep += a
im = np.array([zep / float(acount)])
print zep
print acount
print im
np.savetxt('hamming-%s.txt' %(sys.argv[2]), im, fmt="%10.5f")
|
home-assistant/home-assistant
|
homeassistant/components/vesync/__init__.py
|
Python
|
apache-2.0
| 4,023
| 0.000746
|
"""VeSync integration."""
import logging
from pyvesync import VeSync
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, Platform
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .common import async_process_devices
from .const import (
DOMAIN,
SERVICE_UPDATE_DEVS,
VS_DISCOVERY,
VS_DISPATCHERS,
VS_FANS,
VS_LIGHTS,
VS_MANAGER,
VS_SWITCHES,
)
PLATFORMS = ["switch", "fan", "light"]
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = cv.removed(DOMAIN, raise_if_present=False)
async def async_setup_entry(hass, config_entry):
"""Set up Vesync as config entry."""
username = config_entry.data[CONF_USERNAME]
password = config_entry.data[CONF_PASSWORD]
time_zone = str(hass.config.time_zone)
manager = VeSync(username, password, time_zone)
login = await hass.async_add_executor_job(manager.login)
if not login:
_LOGGER.error("Unable to login to the VeSync server")
return False
device_dict = await async_process_devices(hass, manager)
forward_setup = hass.config_entries.async_forward_entry_setup
hass.data[DOMAIN] = {}
hass.data[DOMAIN][VS_MANAGER] = manager
switches = hass.data[DOMAIN][VS_SWITCHES] = []
fans = hass.data[DOMAIN][VS_FANS] = []
lights = hass.data[DOMAIN][VS_LIGHTS] = []
hass.data[DOMAIN][VS_DISPATCHERS] = []
if device_dict[VS_SWITCHES]:
switches.extend(device_dict[VS_SWITCHES])
hass.async_create_task(forward_setup(config_entry, Platform.SWITCH))
if device_dict[VS_FANS]:
fans.extend(device_dict[VS_FANS])
hass.async_create_task(forward_setup(config_entry, Platform.FAN))
if device_dict[VS_LIGHTS]:
lights.extend(device_dict[VS_LIGHTS])
hass.async_create_task(forward_setup(config_entry, Platform.LIGHT))
async def async_new_device_discovery(service):
"""Discover if new devices should be added."""
manager = hass.data[DOMAIN][VS_MANAGER]
switches = hass.data[DOMAIN][VS_SWITCHES]
fans = hass.data[DOMAIN][VS_FANS]
lights = hass.data[DOMAIN][VS_LIGHTS]
dev_dict = await async_proces
|
s_devices(hass, manager)
switch_devs = dev_dict.get(VS_SWITCHES, [])
fan_devs = dev_dict.get(VS_FANS, [])
light_devs = dev_dict.get(VS_LIGHTS, [])
switch_set = set(switch_devs)
new_switches = list(switch_set.difference(switches))
if new_switches and switches:
switches.extend(new_switches)
async_dispatcher_send(hass, VS_DISCOVERY.format(VS_SWITCHES), new_switch
|
es)
return
if new_switches and not switches:
switches.extend(new_switches)
hass.async_create_task(forward_setup(config_entry, "switch"))
fan_set = set(fan_devs)
new_fans = list(fan_set.difference(fans))
if new_fans and fans:
fans.extend(new_fans)
async_dispatcher_send(hass, VS_DISCOVERY.format(VS_FANS), new_fans)
return
if new_fans and not fans:
fans.extend(new_fans)
hass.async_create_task(forward_setup(config_entry, "fan"))
light_set = set(light_devs)
new_lights = list(light_set.difference(lights))
if new_lights and lights:
lights.extend(new_lights)
async_dispatcher_send(hass, VS_DISCOVERY.format(VS_LIGHTS), new_lights)
return
if new_lights and not lights:
lights.extend(new_lights)
hass.async_create_task(forward_setup(config_entry, "light"))
hass.services.async_register(
DOMAIN, SERVICE_UPDATE_DEVS, async_new_device_discovery
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
erja-gp/openthread
|
tools/harness-automation/cases/sed_9_2_13.py
|
Python
|
bsd-3-clause
| 1,871
| 0.001603
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright h
|
older nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGH
|
T HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class SED_9_2_13(HarnessCase):
role = HarnessCase.ROLE_SED
case = '9 2 13'
golden_devices_required = 5
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
timeu/PyGWAS
|
pygwas/core/plot.py
|
Python
|
mit
| 4,855
| 0.006591
|
import scipy as sp
import matplotlib
import matplotlib.pyplot as plt
def simple_log_qqplot(quantiles_list, png_file=None, pdf_file=None, quantile_labels=None, line_colors=None,
max_val=5, title=None, text=None, plot_label=None, ax=None, **kwargs):
storeFig = False
if ax is None:
f = plt.figure(figsize=(5.4, 5))
ax = f.add_axes([0.1, 0.09, 0.88, 0.86])
storeFig = True
ax.plot([0, max_val], [0, max_val], 'k--', alpha=0.5, linewidth=2.0)
num_dots = len(quantiles_list[0])
exp_quantiles = sp.arange(1, num_dots + 1, dtype='single') / (num_dots + 1) * max_val
for i, quantiles in enumerate(quantiles_list):
if line_colors:
c = line_colors[i]
else:
c = 'b'
if quantile_labels:
ax.plot(exp_quantiles, quantiles, label=quantile_labels[i], c=c, alpha=0.5, linewidth=2.2)
else:
ax.plot(exp_quantiles, quantiles, c=c, alpha=0.5, linewidth=2.2)
ax.set_ylabel("Observed $-log_{10}(p$-value$)$")
ax.set_xlabel("Expected $-log_{10}(p$-value$)$")
if title:
ax.title(title)
max_x = max_val
max_y = max(map(max, quantiles_list))
ax.axis([-0.025 * max_x, 1.025 * max_x, -0.025 * max_y, 1.025 * max_y])
if quantile_labels:
fontProp = matplotlib.font_manager.FontProperties(size=10)
ax.legend(loc=2, numpoints=2, handlelength=0.05, markerscale=1, prop=fontProp, borderaxespad=0.018)
y_min, y_max = plt.ylim()
if text:
f.text(0.05 * max_val, y_max * 0.9, text)
if plot_label:
f.text(-0.138 * max_val, y_max * 1.01, plot_label, fontsize=14)
if storeFig == Fal
|
se:
return
if png_file != None:
f.savefig(png_file)
if pdf_file != None:
f.savefig(pdf_file, format='pdf')
def simple_qqplot(quantiles_list, png_file=None, pdf_file=None, quantile_labels=None, line_colors=None,
title=None, text=None, ax=None, plot_label=None, **kwargs):
storeFig = False
if ax is None:
f = p
|
lt.figure(figsize=(5.4, 5))
ax = f.add_axes([0.11, 0.09, 0.87, 0.86])
storeFig = True
ax.plot([0, 1], [0, 1], 'k--', alpha=0.5, linewidth=2.0)
num_dots = len(quantiles_list[0])
exp_quantiles = sp.arange(1, num_dots + 1, dtype='single') / (num_dots + 1)
for i, quantiles in enumerate(quantiles_list):
if line_colors:
c = line_colors[i]
else:
c = 'b'
if quantile_labels:
ax.plot(exp_quantiles, quantiles, label=quantile_labels[i], c=c, alpha=0.5, linewidth=2.2)
else:
ax.plot(exp_quantiles, quantiles, c=c, alpha=0.5, linewidth=2.2)
ax.set_ylabel("Observed $p$-value")
ax.set_xlabel("Expected $p$-value")
if title:
ax.title(title)
ax.axis([-0.025, 1.025, -0.025, 1.025])
if quantile_labels:
fontProp = matplotlib.font_manager.FontProperties(size=10)
ax.legend(loc=2, numpoints=2, handlelength=0.05, markerscale=1, prop=fontProp, borderaxespad=0.018)
if text:
f.text(0.05, 0.9, text)
if plot_label:
f.text(-0.151, 1.04, plot_label, fontsize=14)
if storeFig == False:
return
if png_file != None:
f.savefig(png_file)
if pdf_file != None:
f.savefig(pdf_file, format='pdf')
def plot_simple_qqplots(png_file_prefix, results, result_labels=None, line_colors=None,
num_dots=1000, title=None, max_neg_log_val=5):
"""
Plots both log QQ-plots and normal QQ plots.
"""
qs = []
log_qs = []
for res in results:
pvals = res.snp_results['scores'][:]
qs.append(get_quantiles(pvals, num_dots))
log_qs.append(get_log_quantiles(pvals, num_dots, max_neg_log_val))
simple_qqplot(qs, png_file_prefix + '_qq.png', quantile_labels=result_labels,
line_colors=line_colors, num_dots=num_dots, title=title)
simple_log_qqplot(log_qs, png_file_prefix + '_log_qq.png', quantile_labels=result_labels,
line_colors=line_colors, num_dots=num_dots, title=title, max_val=max_neg_log_val)
def plot_simple_qqplots_pvals(png_file_prefix, pvals_list, result_labels=None, line_colors=None,
num_dots=1000, title=None, max_neg_log_val=5):
"""
Plots both log QQ-plots and normal QQ plots.
"""
qs = []
log_qs = []
for pvals in pvals_list:
qs.append(get_quantiles(pvals, num_dots))
log_qs.append(get_log_quantiles(pvals, num_dots, max_neg_log_val))
simple_qqplot(qs, png_file_prefix + '_qq.png', quantile_labels=result_labels,
line_colors=line_colors, num_dots=num_dots, title=title)
simple_log_qqplot(log_qs, png_file_prefix + '_log_qq.png', quantile_labels=result_labels,
line_colors=line_colors, num_dots=num_dots, title=title, max_val=max_neg_log_val)
|
sherpya/archiver
|
backend_xmlrpc.py
|
Python
|
gpl-2.0
| 2,693
| 0.00557
|
#!/usr/bin/env python
# -*- Mode: Python; tab-width: 4 -*-
#
# Netfarm Mail Archiver - release 2
#
# Copyright (C) 2005-2007 Gianluigi Tiesi <sherpya@netfarm.it>
# Copyright (C) 2005-2007 NetFarm S.r.l. [http://www.netfarm.it]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY W
|
ARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# ======================================================================
## @file backend_xmlrpc.py
## XMLrpc Storage and Archive Backend
__doc__ = '''Netfarm Archiver - release 2.1.0 - XmlRpc backend'''
__version__ = '2.1.0'
__all__ = [ '
|
Backend' ]
from archiver import *
from sys import exc_info
from xmlrpclib import ServerProxy, Error
from urlparse import urlparse
from time import mktime
_prefix = 'XmlRpc Backend: '
##
class BadUrlSyntax(Exception):
"""BadUrlSyntax Bad url syntax in config file"""
pass
class Backend(BackendBase):
"""XMLrpc Backend using python-xmlrpc
This backend can be used with a xmlrpc capable server like zope"""
def __init__(self, config, stage_type, ar_globals):
"""The constructor"""
self.config = config
self.type = stage_type
self.LOG = ar_globals['LOG']
try:
self.url = config.get(self.type, 'url')
self.method = config.get(self.type, 'method')
self.server = ServerProxy(self.url)
except:
raise BadConfig, 'Bad config in xmlrpc backend'
self.LOG(E_ALWAYS, 'XmlRpc Backend (%s) at %s' % (self.type, self.url))
def process(self, data):
"""Archive backend proces
@param data: The data argument is a dict containing mail info and the mail itself
@return: year as status and pid as code"""
## FIXME wrap with xmlrpc DateTime - time.struct_time objects cannot be marshalled
data['m_date'] = mktime(data['m_date'])
self.LOG(E_TRACE, 'XmlRpc Backend (%s): ready to process %s' % (self.type, data))
try:
getattr(self.server, self.method)({'data': data})
except Error, v:
del v ## FIXME Fill error
return 0, 443, 'Error'
return 0, 200, 'Ok'
def shutdown(self):
"""Backend Shutdown callback"""
self.LOG(E_ALWAYS, 'XmlRpc Backend (%s): closing connection' % self.type)
self.server = None
|
ross128/pybus
|
bus.py
|
Python
|
agpl-3.0
| 2,629
| 0.030049
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import zmq
import threading
impo
|
rt logging
import logging.handlers
import util
class Logger(threading.Thread):
"""logger for all messages and events"""
def __init__(self, stop_logging, filename='bus.log'):
super(Logger, self).__init__()
self.filename = filename
self.stop_logging = stop_logging
# receiving socket
self.context = zmq.Context.instance()
self.log_in = self.context.socket(zmq.PAIR)
self.log_in.connect("inproc://logging"
|
)
self.log_in.setsockopt(zmq.RCVTIMEO, 1000)
# logger parameters for stdout and compressed file
log_format = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
file_log_handler = util.TimedCompressingRotatingFileHandler(self.filename, when='midnight', backupCount=7)
file_log_handler.setFormatter(log_format)
stream_log_handler = logging.StreamHandler()
stream_log_handler.setFormatter(log_format)
self.logger = logging.getLogger('logger')
self.logger.setLevel(logging.INFO)
self.logger.addHandler(file_log_handler)
self.logger.addHandler(stream_log_handler)
def run(self):
while not self.stop_logging.is_set():
try:
# receive message
message = self.log_in.recv_multipart()
if len(message) > 1:
# message with content
[topic, contents] = message
self.logger.info("[msg] {%s} %s", topic, contents)
else:
# subscribe/unsubscribe
message = message[0]
topic = message[1:]
if message.startswith(b'\x00'):
# unsubscribe
self.logger.info("[unsub] {%s}", topic)
elif message.startswith(b'\x01'):
# subscribe
self.logger.info("[sub] {%s}", topic)
else:
self.logger.warning("[unknown message] %s", message)
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
self.logger.error("socket error, stopped logging")
break
elif e.errno == zmq.EAGAIN:
pass
else:
print(e)
self.logger.error("unknown error occurred during logging")
def main():
context = zmq.Context.instance()
# socket facing clients
frontend = context.socket(zmq.XSUB)
frontend.bind("tcp://*:5559")
# socket facing services
backend = context.socket(zmq.XPUB)
backend.bind("tcp://*:5560")
# log socket
log_out = context.socket(zmq.PAIR)
log_out.bind("inproc://logging")
# start logging thread
stop_logging = threading.Event()
logger = Logger(stop_logging)
logger.start()
try:
zmq.proxy(frontend, backend, log_out)
except KeyboardInterrupt:
print("shutting down")
finally:
frontend.close()
backend.close()
stop_logging.set()
logger.join()
if __name__ == "__main__":
main()
|
isawnyu/textnorm
|
textnorm/__init__.py
|
Python
|
agpl-3.0
| 4,388
| 0.000228
|
"""Normalize whitespace and Unicode forms in Python 3.
Functions:
normalize_space() -- collapse whitespace and trim
normalize_unicode() -- return specified Unicode normal form
"""
__author__ = 'Tom Elliott'
__copyright__ = 'Copyright ©️ 2017 New York University'
__license__ = 'See LICENSE.txt'
__version__ = '0.3'
import logging
import sys
import unicodedata
def normalize_space(v: str, preserve: list = [], trim: bool = True):
"""Normalize space in a Unicode string.
Keyword arguments:
* v: the Unicode string to normalize
* preserve: a list of Unicode character strings to preserve instead of
treating them as whitespace (see tests for examples)
* trim: if True (default), strips whitespace at beginning and end of
string; if False, collapses whitespace at beginning and end
according to regular algorithm + preserve settings.
Returns the normalized Unicode string.
The function collapses all continuous runs of whitespace into a single
whitespace character unless one or more characters are found in the
"preserve" list. Characters found in the "preserve" list are maintained in
the output; however, other adjoining whitespace characters are still
eliminated. If "trim" is True, leading/trailing whitespace is eliminated
entirely, otherwise these is treated the same as other whitespace
substrings.
"""
logger = logging.getLogger(sys._getframe().f_code.co_name)
logger.debug('v: {}'.format(repr(v)))
if len(preserve) == 0:
s = ' '.join(v.split())
else:
token = preserve[0]
normed = []
for chunk in v.split(token):
normed.append(normalize_space(chunk, preserve[1:]))
s = token.join(normed)
logger.debug('s: {}'.format(repr(s)))
if not trim:
if v != s:
first = ''
last = ''
chunks = v.split()
logger.debug('chunks: {}'.format(repr(chunks)))
vi = v.index(chunks[0])
si = s.index(chunks[0])
if si == 0 and v[0] != s[0]:
first = ' '
vi = v.rindex(chunks[-1]) + len(chunks[-1])
si = s.rindex(chunks[-1]) + len(chunks[-1])
logger.debug('vi:
|
{}'.format(repr(vi)
|
))
logger.debug('si: {}'.format(repr(si)))
if si == len(s) and len(v) > vi:
last = ' '
s = first + s + last
return s
def normalize_unicode(v: str, target='NFC', check_compatible=False):
"""Normalize Unicode form.
Keyword arguments:
* v: the Unicode string to normalize
* target: the targeted normalization form as a string expected by
unicodedata.normalize(). No checking is done on the value of
this argument.
* check_compatible: detect differences in canonical and compatibility
form results
Returns the normalized Unicode string.
Raises ValueError if check_compatible is True and the canonical and
compatibility forms differ.
This function wraps unicodedata.normalize from the standard library,
adding the optional compatibility check when appropriate.
"""
normalized = unicodedata.normalize(target, v)
if check_compatible:
if target == 'NFC':
compatibility_target = 'NFKC'
elif target == 'NFD':
compatibility_target = 'NFKD'
elif target == 'NFKC':
compatibility_target = 'NFC'
elif target == 'NFKD':
compatibility_target = 'NFD'
compatible = unicodedata.normalize(compatibility_target, v)
if normalized != compatible:
msg = (
'Unicode normalization may have changed the string "{}" in '
'an undesireable way or may have failed to do so in a manner '
'desired. The {} normalized form '
'"{}" ({}) does not match the corresponding {} form '
'"{}" ({}).'
''.format(
v,
target,
normalized,
normalized.encode('ascii', 'namereplace'),
compatibility_target,
compatible,
compatible.encode('ascii', 'namereplace')))
raise ValueError(msg)
return normalized
|
RealTimeWeb/Blockpy-Server
|
static/programs/temperatures.py
|
Python
|
mit
| 634
| 0.012618
|
Plot the forecasted temperatures of Miami in Celsius. You'll need t
|
o use the "<a href='#'>create empty list</a>" and "<a href='#'>append</a>" blocks to create a new list of Celsius temperatures from the forecasted temperatures in Blacksburg, and then plot these new temperatures against the old ones.
#####
import weather
import matplotlib.pyplot as plt
celsius_temperatures = []
for t in weather.get_forecasts("Miami, FL"):
celsius = (t - 32) / 1.8
celsius
|
_temperatures.append(celsius)
plt.plot(celsius_temperatures)
plt.title("Temperatures in Miami")
plt.show()
#####
def on_run(code, output, properties):
return True
|
chenyyx/scikit-learn-doc-zh
|
examples/zh/applications/plot_prediction_latency.py
|
Python
|
gpl-3.0
| 11,475
| 0
|
"""
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.utils import shuffle
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[[i], :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
random_seed = 13
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_train, random_state=random_seed)
X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)
X_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
X_test = X_scaler.transform(X_test)
y_scaler = StandardScaler()
y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]
y_test = y_scaler.transform(y_test[:, None])[:, 0]
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.se
|
t_title('Prediction Time per Instance -
|
%s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_tra
|
12019/pyscard
|
smartcard/Examples/wx/apdumanager/apdumanager.py
|
Python
|
lgpl-2.1
| 1,988
| 0.004527
|
#! /usr/bin/env python
"""
Simple application to send APDUs to a card.
__author__ = "http://www.gemalto.com"
Copyright 2001-2010 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import os
import sys
import os.path
from smartcard.wx.SimpleSCardApp import *
from SampleAPDUManagerPanel import SampleAPDUManagerPanel
def we_are_frozen():
"""Returns whether we are frozen via py2exe.
This will affect how we find out where we are located.
From WhereAmI page on py2exe wiki."""
return hasattr(sys, "frozen")
def module_path():
""" This will get us the program's directory,
even if
|
we are frozen using py2exe. From WhereAmI page on py2exe wiki."""
if we_are_frozen():
return os.path.dirname( unicode(sys.executable, sys.getfilesystemencoding( )) )
return os.path.dirname(unicode(__file__, sys.getfilesystemencoding( )))
def main(argv):
app = SimpleSCardApp(
appname='A tool to send apdu to a card',
apppanel=SampleAPDUManagerPanel,
appstyle=TR_SMARTCARD | TR_READER | PANEL_APDUTRACER,
|
appicon=os.path.join( module_path(), 'images', 'mysmartcard.ico'),
size=(800, 600))
app.MainLoop()
if __name__ == "__main__":
import sys
main(sys.argv)
|
homeworkprod/byceps
|
byceps/signals/shop.py
|
Python
|
bsd-3-clause
| 362
| 0
|
"""
byceps.signals.shop
~~~~~~~~~~~~~
|
~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from blinker import Namespace
shop_signals = Namespace()
order_placed = shop_signals.signal('order-placed')
order
|
_canceled = shop_signals.signal('order-canceled')
order_paid = shop_signals.signal('order-paid')
|
tldavies/RackHD
|
test/tests/rackhd20/test_rackhd20_api_files.py
|
Python
|
apache-2.0
| 3,026
| 0.00727
|
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import os
import sys
import subprocess
# set path to common libraries
sys.path.append(subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/test/common")
import fit_common
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class rackhd20_api_files(fit_common.unittest.TestCase):
def setUp(self):
# create test file
TESTFILE = open(fit_common.TEST_PATH + 'testfile','w+')
TESTFILE.write("1234567890ABCDEF")
TESTFILE.close()
# delete any instance of testfile on host
api_data = fit_common.rackhdapi('/api/2.0/files')
for item in api_data['json']:
if item['filename'] == 'testfile':
fit_common.rackhdapi('/api/2.0/files/' + item['uuid'], action="delete")
def tearDown(self):
os.remove(fit_common.TEST_PATH + 'testfile')
def test_api_20_files_put_get_delete(self):
# put file fia files API, then check data
api_data = fit_common.rackhdapi('/api/2.0/files/testfile', action="binary-put", payload = file(fit_common.TEST_PATH + 'testfile').read())
self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expected 201, got:' + str(api_data['status']))
# Retrieve file
fileid = ""
api_data = fit_common.rackhdapi('/api/2.0/files')
for item in api_data['json']:
if item['filename'] == 'testfile':
fileid = item['uuid']
api_data = fit_common.rackhdapi('/
|
api/2.0/files/' + fileid)
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_da
|
ta['status']))
self.assertEqual(open(fit_common.TEST_PATH + 'testfile').read(), api_data['text'], 'File corrupted, ID: ')
# list all
api_data = fit_common.rackhdapi('/api/2.0/files')
self.assertIn(fileid, fit_common.json.dumps(api_data['json']), 'File ID missing in file list.')
# check md5
api_data = fit_common.rackhdapi('/api/2.0/files/testfile/md5')
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
# check metadata
api_data = fit_common.rackhdapi('/api/2.0/files/testfile/metadata')
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for item in ['md5', 'name', 'sha256', 'uuid']:
if fit_common.VERBOSITY >= 2:
print "Checking:", item
self.assertGreater(len(api_data['json'][item]), 0, item + ' field error')
# delete file
api_data = fit_common.rackhdapi('/api/2.0/files/' + fileid, action='delete')
self.assertEqual(api_data['status'], 204, 'Incorrect HTTP return code, expected 204, got:' + str(api_data['status']))
if __name__ == '__main__':
fit_common.unittest.main()
|
pfederl/CARTAvis
|
carta/scriptedClient/layer2.py
|
Python
|
gpl-2.0
| 2,551
| 0.001176
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import struct
from layer1 import VarLenMessage, VarLenSocket
class TagMessage:
"""
TagMessage contains a string tag and string data.
Paramters
---------
tag: string
The type of the message. Currently supported tags are "json" and
"async".
data: string
The data content of the message.
"""
def __init__(self, tag, data):
self.tag = tag
self.data = data
def toVarLenMessage(self):
"""
Convert a TagMessage to a VarLenMessage
Returns
-------
VarLenMessage
A VarLenMessage representation of this TagMessage.
"""
binData = bytearray(self.tag)
binData.append(0)
binData.extend(self.data)
return VarLenMessage(binData)
@staticmethod
def fromVarLenMessage(vlm):
"""
Create a TagMessage from a VarLenMessage.
Parameters
----------
vlm: VarLenMessage
The message to convert to a TagMessage.
Returns
-------
TagMessage
A TagMessage representation o
|
f the input VarLenMessage.
"""
#
|
find the position of the '\0'
end = vlm.data.find('\0')
if end < 0:
raise NameError('received tag message has no null char')
# extract the tag (null terminated string)
fmt = "{0}s".format(end)
tag = struct.unpack_from(fmt, vlm.data)[0]
# the data is the rest of the message
data = vlm.data[end+1:]
# return the tag message
return TagMessage(tag,data)
class TagMessageSocket:
"""
A socket wrapper that allows sending and receiving of TagMessages.
Parameters
----------
rawSocket: socket
"""
def __init__(self, rawSocket):
self.varLenSocket = VarLenSocket(rawSocket)
def send(self, tagMessage):
"""
Send a TagMessage by converting it to a VarLenMessage.
Parameters
----------
tagMessage: TagMessage
The message to send.
"""
self.varLenSocket.send(tagMessage.toVarLenMessage())
def receive(self):
"""
Receive a TagMessage.
Returns
-------
TagMessage
A TagMessage that has been converted from a VarLenMessage.
"""
# get the VarLenMessage
vlm = self.varLenSocket.receive()
# convert the VarLenMessage to TagMessage
return TagMessage.fromVarLenMessage(vlm)
|
tomwhartung/edison_python
|
my_examples/02.Digital/08-buttonTurnsOnLed.py
|
Python
|
gpl-3.0
| 672
| 0.040179
|
#!/usr/bin/python
#
# 08-buttonTurnsOnLed.py: when the button is pressed, turn on the led
# -------------------------------------------------------------------
#
import mraa
import time
LOW = 0
HIGH = 1
digitalInPin = 8
digitalInGpio = mraa.Gpio( digitalInPin )
ledOutPin = 3
ledOutGpio = mraa.Gpio( ledOutPin )
ledOutGpio.dir(mraa.DIR_O
|
UT)
##
# loop: what to do "forever"
#
def loop() :
digitalInInteger = digitalInGpio.read()
print( 'digitalInInteger: ' + str(digitalInInteger) )
if( digitalInInteger == 1 ) :
ledOutGpio.write( HIGH )
else :
ledOutGpio.write( LOW )
#
# mainline loop:
#
sleepSecs = 0.5
while True:
loop()
time.s
|
leep( sleepSecs )
exit(0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.