repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
saurabh6790/frappe
|
frappe/build.py
|
Python
|
mit
| 14,371
| 0.025277
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import os
import re
import json
import shutil
import subprocess
from tempfile import mkdtemp, mktemp
from distutils.spawn import find_executable
import frappe
from frappe.utils.minify import JavascriptMinify
import click
import psutil
from urllib.parse import urlparse
from simple_chalk import green
from semantic_version import Version
timestamps = {}
app_paths = None
sites_path = os.path.abspath(os.getcwd())
def download_file(url, prefix):
from requests import get
filename = urlparse(url).path.split("/")[-1]
local_filename = os.path.join(prefix, filename)
with get(url, stream=True, allow_redirects=True) as r:
r.raise_for_status()
with open(local_filename, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
def build_missing_files():
'''Check which files dont exist yet from the assets.json and run build for those files'''
missing_assets = []
current_asset_files = []
for type in ["css", "js"]:
folder = os.path.join(sites_path, "assets", "frappe", "dist", type)
current_asset_files.extend(os.listdir(folder))
development = frappe.local.conf.developer_mode or frappe.local.dev_server
build_mode = "development" if development else "production"
assets_json = frappe.read_file("assets/assets.json")
if assets_json:
assets_json = frappe.parse_json(assets_json)
for bundle_file, output_file in assets_json.items():
if not output_file.startswith('/assets/frappe'):
continue
if os.path.basename(output_file) not in current_asset_files:
missing_assets.append(bundle_file)
if missing_assets:
click.secho("\nBuilding missing assets...\n", fg="yellow")
files_to_build = ["frappe/" + name for name in missing_assets]
bundle(build_mode, files=files_to_build)
else:
# no assets.json, run full build
bundle(build_mode, apps="frappe")
def get_assets_link(frappe_head):
from subprocess import getoutput
from requests import head
tag = getoutput(
r"cd ../apps/frappe && git show-ref --tags -d | grep %s | sed -e 's,.*"
r" refs/tags/,,' -e 's/\^{}//'"
% frappe_head
)
if tag:
# if tag exists, download assets from github release
url = "https://github.com/frappe/frappe/releases/download/{0}/assets.tar.gz".format(tag)
else:
url = "http://assets.frappeframework.com/{0}.tar.gz".format(frappe_head)
if not head(url):
raise ValueError("URL {0} doesn't exist".format(url))
return url
def download_frappe_assets(verbose=True):
"""Downloads and sets up Frappe assets if they exist based on the current
commit HEAD.
Returns True if correctly setup else returns False.
"""
from subprocess import getoutput
assets_setup = False
frappe_head = getoutput("cd ../apps/frappe && git rev-parse HEAD")
if frappe_head:
try:
url = get_assets_link(frappe_head)
click.secho("Retrieving assets...", fg="yellow")
prefix = mkdtemp(prefix="frappe-assets-", suffix=frappe_head)
assets_archive = download_file(url, prefix)
print("\n{0} Downloaded Frappe assets from {1}".format(green('✔'), url))
if assets_archive:
import tarfile
directories_created = set()
|
click.secho("\nExtracting assets...\n", fg="yellow")
with tarfile.open(assets_archive) as tar:
for file in tar:
if not file.isdir():
dest = "." + file.name.replace("./frappe-bench/sites", "")
asset_directory = os.path.dirname
|
(dest)
show = dest.replace("./assets/", "")
if asset_directory not in directories_created:
if not os.path.exists(asset_directory):
os.makedirs(asset_directory, exist_ok=True)
directories_created.add(asset_directory)
tar.makefile(file, dest)
print("{0} Restored {1}".format(green('✔'), show))
build_missing_files()
return True
else:
raise
except Exception:
# TODO: log traceback in bench.log
click.secho("An Error occurred while downloading assets...", fg="red")
assets_setup = False
finally:
try:
shutil.rmtree(os.path.dirname(assets_archive))
except Exception:
pass
return assets_setup
def symlink(target, link_name, overwrite=False):
"""
Create a symbolic link named link_name pointing to target.
If link_name exists then FileExistsError is raised, unless overwrite=True.
When trying to overwrite a directory, IsADirectoryError is raised.
Source: https://stackoverflow.com/a/55742015/10309266
"""
if not overwrite:
return os.symlink(target, link_name)
# os.replace() may fail if files are on different filesystems
link_dir = os.path.dirname(link_name)
# Create link to target with temporary filename
while True:
temp_link_name = mktemp(dir=link_dir)
# os.* functions mimic as closely as possible system functions
# The POSIX symlink() returns EEXIST if link_name already exists
# https://pubs.opengroup.org/onlinepubs/9699919799/functions/symlink.html
try:
os.symlink(target, temp_link_name)
break
except FileExistsError:
pass
# Replace link_name with temp_link_name
try:
# Pre-empt os.replace on a directory with a nicer message
if os.path.isdir(link_name):
raise IsADirectoryError("Cannot symlink over existing directory: '{}'".format(link_name))
try:
os.replace(temp_link_name, link_name)
except AttributeError:
os.renames(temp_link_name, link_name)
except:
if os.path.islink(temp_link_name):
os.remove(temp_link_name)
raise
def setup():
global app_paths, assets_path
pymodules = []
for app in frappe.get_all_apps(True):
try:
pymodules.append(frappe.get_module(app))
except ImportError:
pass
app_paths = [os.path.dirname(pymodule.__file__) for pymodule in pymodules]
assets_path = os.path.join(frappe.local.sites_path, "assets")
def bundle(mode, apps=None, hard_link=False, make_copy=False, restore=False, verbose=False, skip_frappe=False, files=None):
"""concat / minify js files"""
setup()
make_asset_dirs(hard_link=hard_link)
mode = "production" if mode == "production" else "build"
command = "yarn run {mode}".format(mode=mode)
if apps:
command += " --apps {apps}".format(apps=apps)
if skip_frappe:
command += " --skip_frappe"
if files:
command += " --files {files}".format(files=','.join(files))
command += " --run-build-command"
check_node_executable()
frappe_app_path = frappe.get_app_path("frappe", "..")
frappe.commands.popen(command, cwd=frappe_app_path, env=get_node_env())
def watch(apps=None):
"""watch and rebuild if necessary"""
setup()
command = "yarn run watch"
if apps:
command += " --apps {apps}".format(apps=apps)
check_node_executable()
frappe_app_path = frappe.get_app_path("frappe", "..")
frappe.commands.popen(command, cwd=frappe_app_path, env=get_node_env())
def check_node_executable():
node_version = Version(subprocess.getoutput('node -v')[1:])
warn = '⚠️ '
if node_version.major < 14:
click.echo(f"{warn} Please update your node version to 14")
if not find_executable("yarn"):
click.echo(f"{warn} Please install yarn using below command and try again.\nnpm install -g yarn")
click.echo()
def get_node_env():
node_env = {
"NODE_OPTIONS": f"--max_old_space_size={get_safe_max_old_space_size()}"
}
return node_env
def get_safe_max_old_space_size():
safe_max_old_space_size = 0
try:
total_memory = psutil.virtual_memory().total / (1024 * 1024)
# reference for the safe limit assumption
# https://nodejs.org/api/cli.html#cli_max_old_space_size_size_in_megabytes
# set minimum value 1GB
safe_max_old_space_size = max(1024, int(total_memory * 0.75))
except Exception:
pass
return safe_max_old_space_size
def generate_assets_map():
symlinks = {}
for app_name in frappe.get_all_apps():
app_doc_path = None
pymodule = frappe.get_module(app_name)
app_base_path = os.path.abspath(os.path.dirname(pymodule.__file__))
app_public_path = os.path.join(app_base_path, "public")
app_node_modules_path = os.path.join(app_base_path, "..", "node_modules")
app_docs_path = os.path.join(app_base_path, "docs")
app_www_docs_path = os.path.join(app_base_path, "www", "docs")
app_assets = os.path.abspath(app_pub
|
greenape/disclosure-game
|
python/disclosuregame/disclosuregame/Agents/__init__.py
|
Python
|
mpl-2.0
| 81
| 0.024691
|
__all__ =
|
["bayes", "cpt", "heuristic", "payoff", "recognition", "rl", "sharin
|
g"]
|
beefoo/still-i-rise
|
sort_audio.py
|
Python
|
mit
| 3,221
| 0.003726
|
# -*- coding: utf-8 -*-
# Description: sort audio clips by intensity, frequency, or duration; outputs .csv file for use in sequence.ck via ChucK
# python sort_audio.py -sort syllables
# python sort_audio.py -sort syllables -by frequency
# python sort_audio.py -sort syllables -by duration -fixed 0
import argparse
import csv
import json
import os
from pprint import pprint
import sys
import time
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="data/still_i_rise.json", help="Path to input aligned transcript json file")
parser.add_argument('-out', dest="OUTPUT_FILE", default="data/ck_sequence.csv", help="Path to output csv sequence file")
parser.add_argument('-sort', dest="SORT_FIELD", default="words", help="Field to sort: syllables, words, or lines")
parser.add_argument('-by', dest="SORT_BY", default="intensity", help="Feature to sort by: intensity, frequency, or duration")
parser.add_argument('-dir', dest="SORT_DIRECTION", type=int, default=1, help="Sort direction: -1 or 1")
parser.add_argument('-overlap', dest="OVERLAP_MS", type=int, default=50, help="Amount of ms to overlap in clip")
parser.add_argument('-fixed', dest="FIXED_MS", type=int, default=200, help="Fixed ms to play each sound clip; set to 0 to disable")
parser.add_argument('-cd', dest="CLIP_DIR", default="clips/", help="Path to clip directory")
parser.add_argument('-fe', dest="FILE_EXT", default=".wav", help="File extension of audio clips")
# init input
args = parser.parse_args()
SORT_FIELD = args.SORT_FIELD
SORT_BY = args.SORT_BY
SORT_DIRECTION = args.SORT_DIRECTION
OVERLAP_MS = args.OVERLAP_MS
FIXED_MS = args.FIXED_MS
CLIP_DIR = args.CLIP_DIR
FILE_EXT = args.FILE_EXT
data = {}
with open(args.INPUT_FILE) as f:
data = json.load(f)
# populate clips
clips = []
if SORT_FIELD in data:
clips = data[SORT_FIELD]
if SORT_FIELD=="syllables":
for wor
|
d in data["words"]:
clips += word["syllables"]
# add duration
for i, clip in enumerate(clips):
clips[i]["duration"] = clip["end"] - clip["start"]
# sort clips
clips = sorted(clips, key=lambda c: SORT_DIRECTION * c[SORT_BY])
# generate a sequence
sequence = []
ms = 0
for clip in clips:
dur =
|
int(clip["duration"] * 1000)
filename = CLIP_DIR + SORT_FIELD + "/" + clip["name"] + FILE_EXT
if os.path.isfile(filename):
sequence.append({
"elapsed_ms": ms,
"gain": 1.0,
"file": filename
})
else:
print "%s not found" % filename
if FIXED_MS > 0:
ms += FIXED_MS
else:
ms += dur - OVERLAP_MS
ms = max(0, ms)
print "Total time: %s" % time.strftime('%M:%S', time.gmtime(ms/1000))
# Add milliseconds to sequence
elapsed = 0
for i, step in enumerate(sequence):
sequence[i]['milliseconds'] = step['elapsed_ms'] - elapsed
elapsed = step['elapsed_ms']
# Write sequence
with open(args.OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
for step in sequence:
w.writerow([step['file']])
w.writerow([step['gain']])
w.writerow([step['milliseconds']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print "Successfully wrote sequence to file: %s" % args.OUTPUT_FILE
|
Amyantis/SocialNewspaper
|
ArticleManagement/urls.py
|
Python
|
gpl-3.0
| 1,336
| 0.001497
|
"""SocialNewspaper URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Cl
|
ass-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.ur
|
ls'))
"""
from django.conf.urls import url
from django.contrib import admin
from ArticleManagement import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^share_article/$', views.share_article, name="share_article"),
url(r'^print_sharing/(?P<article_id>[0-9]+)$', views.print_sharing, name="print_sharing"),
url(r'^insert_article/$', views.insert_article, name="insert_article"),
url(r'^add_interesting/(?P<article_id>[0-9]+)$', views.add_interesting, name="add_interesting"),
url(r'^print_articles/$', views.print_articles, name="print_articles"),
url(r'^editorial/$', views.editorial, name="editorial"),
url(r'^$', views.editorial, name="home")
]
|
mhellmic/davix
|
test/pywebdav/lib/AuthServer.py
|
Python
|
lgpl-2.1
| 3,349
| 0
|
"""Authenticating HTTP Server
This module builds on BaseHTTPServer and implements basic authentication
"""
import base64
import binascii
import BaseHTTPServer
DEFAULT_AUTH_ERROR_MESSAGE = """
<head>
<title>%(code)s - %(message)s</title>
</head>
<body>
<h1>Authorization Required</h1>
this server could not verify that you
are authorized to access the document
requested. Either you supplied the wrong
credentials (e.g., bad password), or your
browser doesn't understand how to supply
the credentials required.
</body>"""
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class AuthRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""
Simple handler that can check for auth headers
In your subclass you have to define the method get_userinfo(user, password)
which should return 1 or None depending on whether the password was
ok or not. None means that the user is not authorized.
"""
# False means no authentiation
DO_AUTH = 1
def parse_request(self):
if not BaseHTTPServer.BaseHTTPRequestHandler.parse_request(self):
return False
if self.DO_AUTH:
authorization = self.headers.get('Authorization', '')
if not authorization:
self.send_autherror(401, "Authorization Required")
return False
scheme, credentials = authorization.split()
if scheme != 'Basic':
self.send_error(501)
return False
credentials = base64.decodestring(credentials)
user, password = c
|
redentials.split(':', 2)
if not self.get_userinfo(user, password, self.command):
self.send_autherro
|
r(401, "Authorization Required")
return False
return True
def send_autherror(self, code, message=None):
"""Send and log an auth error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug
# #1100201)
content = (self.error_auth_message_format % {'code': code, 'message':
_quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header('Content-Type', self.error_content_type)
self.send_header('WWW-Authenticate', 'Basic realm="PyWebDAV"')
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(content)
error_auth_message_format = DEFAULT_AUTH_ERROR_MESSAGE
def get_userinfo(self, user, password, command):
"""Checks if the given user and the given
password are allowed to access.
"""
# Always reject
return None
|
lyw07/kolibri
|
kolibri/core/auth/test/test_datasets.py
|
Python
|
mit
| 2,905
| 0.000688
|
"""
Tests related specifically to the FacilityDataset model.
"""
from django.db.utils import IntegrityError
from django.test import TestCase
from ..models import Classroom
from ..models import Facility
from ..models import FacilityDataset
from ..models import FacilityUser
from ..models import LearnerGroup
class FacilityDatasetTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create()
self.classroom = Classroom.objects.create(parent=self.facility)
self.learner_group = LearnerGroup.objects.create(parent=self.classroom)
self.facility_user = FacilityUser.objects.create(
username="blah", password="#", facility=self.facility
)
def test_datasets_equal(self):
self.assertTrue(self.facility.dataset is not None)
self.assertEqual(self.facility.dataset, self.classroom.dataset)
self.assertEqual(self.classroom.dataset, self.learner_group.dataset)
self.assertEqual(self.learner_group.dataset, self.facility_user.dataset)
def test_cannot_create_role_across_datasets(self):
facility2 = Facility.objects.create()
with self.assertRaises(IntegrityError):
facility2.add_admin(self.facility_user)
def test_cannot_create_membership_across_datasets(self):
facility2 = Facility.objects.create()
facility_user2 = FacilityUser.objects.create(
username="blah", password="#", facility=facility2
)
with self.assertRaises(IntegrityError):
self.learner_group.add_learner(facility_user2)
def test_cannot_pass_inappropriate_dataset(self):
facility2 = Facility.objects.create()
with self.assertRaises(IntegrityError):
FacilityUser.objects.create(
facility=self.facility, dataset=facility2.dataset
)
def test_cannot_change_dataset(self):
facility2 = Facility.objects.create()
self.facility_user.dataset = facility2.dataset
with self.assertRaises(IntegrityError):
self.facility_user.save()
def test_cannot_change_facility(self):
facility2 = Facility.objects.create()
self.facility_user.facility = facility2
with self.assertRaises(IntegrityError):
self.facility_user.save()
|
def test_manually_passing_dataset_for_new_facility(self):
dataset = FacilityDataset.objects.create()
facility = Facility(name="blah", dataset=dataset)
facility.full_clean()
facilit
|
y.save()
self.assertEqual(dataset, facility.dataset)
def test_dataset_representation(self):
self.assertEqual(
str(self.facility.dataset),
"FacilityDataset for {}".format(self.facility.name),
)
new_dataset = FacilityDataset.objects.create()
self.assertEqual(str(new_dataset), "FacilityDataset (no associated Facility)")
|
Brazelton-Lab/lab_scripts
|
esom_tracer2.py
|
Python
|
gpl-2.0
| 12,317
| 0.000568
|
#!/usr/bin/env python
"""Color ESOM data points by Phylogeny
Usage:
esom_tracer2.py [--bam] [--names] [--taxonomy] [--taxa_level] [--output]
Synopsis:
Takes alignment data from short reads mapped to an assembly in BAM format
and the phylogeny of those short reads from the Phylosift
sequence_taxa_summary.txt file to identify and color the phylogeny of each
contig in the assembly.
Copyright:
esom_tracer2.py Color ESOM best matches by phylogenies
Copyright (C) 2016 William Brazelton, Alex Hyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
import argparse
import bz2
from collections import defaultdict
import colorsys
import gzip
import pysam
import string
import sys
import zipfile
__author__ = 'Alex Hyer'
__version__ = '1.3.0'
class Contig:
"""Stores phylogenetic and ESOM data from short reads for a contig"""
def __init__(self, contig_id):
self.name = contig_id
self.taxa_dict = defaultdict(float)
self.chunk_numbers = []
self.class_number = None
def add_chunk_numbers(self, chunk_numbers):
"""Assign ESOM chunk numbers to contig
:param chunk_number: NAMES file number of chunk for contig
:type chunk_number: int or list of ints
"""
if type(chunk_numbers) is int:
chunk_numbers = [chunk_numbers]
self.chunk_numbers += chunk_numbers
def assign_class_number(self, class_number):
"""Assign the contig to an ESOM class
:param class_number: the ESOM class to assign the contig to
:type class_number: int
"""
self.class_number = class_number
def add_taxa_data(self, taxa_name, prob_mass):
"""Add Phylosift short read data to contig information
Note: all taxa given are assumed to be at the same taxonomic level
:param taxa_name: The taxa the short read is associated with
:type taxa_name: str
:param prob_mass: The Phylosift probability mass for the read
"type prob_mass: float
"""
self.taxa_dict[taxa_name] += prob_mass
def best_taxa(self):
"""Identify the most probable taxa for the contig and return it
:returns: most probable taxa for the contig
:rtype: str
"""
try:
taxa = max(self.taxa_dict.iteritems(), key=lambda x: x[1])[0]
except ValueError:
taxa = None
return taxa
def possible_taxa(self):
"""Returns all possible taxa for contig
:returns: all possible taxa for contig
:rtype: view
"""
return self.taxa_dict.keys()
def names_dict(names_handle):
"""Returns nested dictionary of NAMES file
:returns: Dictionary as structured below
:rtype: dict
:param names_handle: file handle to NAMES file
:type names_handle: File Object
Dictionary Structure (YAML format)
----------------------------------
contig_name:
contig_chunk: chunk_number
"""
temp_dict = defaultdict(dict)
names_handle.readline()
for line in names_handle:
columns = line.strip().split('\t')
name = '-'.join(columns[2].split('_')[0:2]).strip()
temp_dict[name][columns[1]] = columns[0]
return temp_dict
def rainbow_picker(scale):
"""Generates rainbow RGB values
:returns: [scale] number of RGB tuples
:rtype: list
:param scale: n
|
umber of RGB values to generate
:type scale: int
"""
hsv_tuples = [(float(i) / float(scale), 1.0, 1.0) for i in range(scale)]
rgb_tuples = map(lambda x: tuple(i * 255 for i in \
colorsys.hsv_to_rgb(*x)), hsv_tuples)
return rgb_tuples
def taxa_dict(taxa_handle):
"""Returns nested dictionary of sequence_taxa_summary.txt
:returns: Dictionary as structured below
:rtype: dict
:param taxa_handle: file handle to sequence_taxa_summary.txt
|
:type taxa_handle: File Object
Dictionary Structure (YAML format)
----------------------------------
short_read_name:
taxa_level: [taxa_name,probability_mass]
"""
temp_dict = defaultdict(dict)
taxa_handle.readline()
for line in taxa_handle:
columns = line.strip().split('\t')
temp_dict[columns[0].strip().split()[0]][columns[3]] = [columns[4], columns[5]]
return temp_dict
def x_reader(file_name):
"""Detect compression type and return appropriate file handle
:returns: A file handle depending on file type
:rtype: File Handle
:param file_name: Name of file to open
:type file_name: str
Supports GZIP, BZIP2, and ZIP compressed files,
returns a normal file handle if file isn't compressed.
"""
supported_files = {
'gz': gzip.open,
'bz2': bz2.BZ2File,
'zip': zipfile.ZipFile.open
}
last_ext = file_name.split('.')[-1]
if last_ext in supported_files:
return supported_files[last_ext](file_name, 'rU')
else:
return open(file_name, 'rU')
def main(args):
print(' '.join(sys.argv[:]))
# Instantiate each contig and assign chunk numbers
print('> Processing {0}'.format(args.names.name))
names = names_dict(args.names)
args.names.close()
print('> Processed {0} unique contigs from {1}'.format(str(len(names)),
args.names.name))
contigs = defaultdict(dict)
for name in names:
contigs[name] = Contig(name)
chunk_numbers = [int(names[name][chunk]) for chunk in names[name]]
contigs[name].add_chunk_numbers(chunk_numbers)
# Add taxonomy data to Contig based on what short reads map to them
print('> Processing {0}'.format(args.taxonomy.name))
taxa = taxa_dict(args.taxonomy)
args.taxonomy.close()
print('> Processed {0} short reads from {1}'.format(str(len(taxa)),
args.taxonomy.name))
unique_taxa = {'N/A': 1}
unique_taxa_number = 2
print('> Processing {0}'.format(args.bam.filename))
references_match_contigs = 0
reads_mapping_contigs = 0
mapped_taxa_reads = 0
for reference in args.bam.references:
if reference in contigs:
references_match_contigs += 1
for read in args.bam.fetch(reference=reference):
reads_mapping_contigs += 1
read_name = read.query_name
if read_name in taxa and args.taxa_level in taxa[read_name]:
mapped_taxa_reads += 1
taxa_name = taxa[read_name][args.taxa_level][0]
prob_mass = float(taxa[read_name][args.taxa_level][1])
contigs[reference].add_taxa_data(taxa_name, prob_mass)
if taxa_name not in unique_taxa:
unique_taxa[taxa_name] = unique_taxa_number
unique_taxa_number += 1
args.bam.close()
print('> {0} contigs in {1} matched contigs in {2}'.format(
str(references_match_contigs),
args.bam.filename,
args.names.name))
print('> {0} reads from {1} map to contigs in {2}'.format(
str(reads_mapping_contigs),
args.bam.filename,
args.names.name))
print('> {0} reads from {1} map to contigs in {2} and have assigned '
'taxa from {3} at the level {4}'.format(str(mapped_taxa_reads),
args.bam.filename,
|
xenobyter/xbWeatherSocket
|
SocketFrame.py
|
Python
|
isc
| 3,029
| 0.002311
|
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.6.8 on Thu Apr 2 20:01:32 2015
#
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class SocketFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: SocketFrame.__init__
kwds["style"] = wx.DEFAULT
|
_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.button_aquarium = wx.ToggleButton(self, wx.ID_ANY, "Aquarium")
self.button_kitchen = wx.ToggleButton(self, wx.ID_ANY, u"K\xfcche")
sel
|
f.button_bedroom = wx.ToggleButton(self, wx.ID_ANY, "Schlafstube")
self.button_back = wx.Button(self, wx.ID_ANY, u"Zur\xfcck")
self.button_livingroom = wx.ToggleButton(self, wx.ID_ANY, "Wohnstube")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnBtnAquarium, self.button_aquarium)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnBtnKitchen, self.button_kitchen)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnBtnBedroom, self.button_bedroom)
self.Bind(wx.EVT_BUTTON, self.OnBtnBack, self.button_back)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnBtnLivingroom, self.button_livingroom)
# end wxGlade
def __set_properties(self):
# begin wxGlade: SocketFrame.__set_properties
self.SetTitle("frame_1")
self.SetSize((483, 273))
# end wxGlade
def __do_layout(self):
# begin wxGlade: SocketFrame.__do_layout
grid_sizer_2 = wx.GridSizer(2, 3, 0, 0)
grid_sizer_2.Add(self.button_aquarium, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
grid_sizer_2.Add(self.button_kitchen, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
grid_sizer_2.Add(self.button_bedroom, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
grid_sizer_2.Add(self.button_back, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
grid_sizer_2.Add(self.button_livingroom, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
self.SetSizer(grid_sizer_2)
self.Layout()
# end wxGlade
def OnBtnAquarium(self, event): # wxGlade: SocketFrame.<event_handler>
print "Event handler 'OnBtnAquarium' not implemented!"
event.Skip()
def OnBtnKitchen(self, event): # wxGlade: SocketFrame.<event_handler>
print "Event handler 'OnBtnKitchen' not implemented!"
event.Skip()
def OnBtnBedroom(self, event): # wxGlade: SocketFrame.<event_handler>
print "Event handler 'OnBtnBedroom' not implemented!"
event.Skip()
def OnBtnBack(self, event): # wxGlade: SocketFrame.<event_handler>
print "Event handler 'OnBtnBack' not implemented!"
event.Skip()
def OnBtnLivingroom(self, event): # wxGlade: SocketFrame.<event_handler>
print "Event handler 'OnBtnLivingroom' not implemented!"
event.Skip()
# end of class SocketFrame
|
atagar/ReviewBoard
|
reviewboard/accounts/tests.py
|
Python
|
mit
| 2,192
| 0
|
from django.contrib.auth.models import User
from djblets.testing.decorators import add_fixtures
from djblets.testing.testcases import TestCase
from reviewboard.accounts.models import LocalSiteProfile
from reviewboard.reviews.models import ReviewRequest
class ProfileTests(TestCase):
"""Testing the Profile model."""
fixtures = ['test_users']
def test_is_profile_visible_with_public(self):
"""Testing User.is_profile_public wit
|
h public profiles."""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(username='doc')
self.assertTrue(user1.is_profile_visible(user2))
def test_is_profile_visible_with_private(self):
"""Testing User.is_profile_public with private profiles."""
user1 = User.objects.get(username='admin')
user2 = User.objects.get(
|
username='doc')
profile = user1.get_profile()
profile.is_private = True
profile.save()
self.assertFalse(user1.is_profile_visible(user2))
self.assertTrue(user1.is_profile_visible(user1))
user2.is_staff = True
self.assertTrue(user1.is_profile_visible(user2))
@add_fixtures(['test_reviewrequests', 'test_scmtools', 'test_site'])
def test_is_star_unstar_updating_count_correctly(self):
"""Testing if star, unstar affect review request counts correctly."""
user1 = User.objects.get(username='admin')
profile1 = user1.get_profile()
review_request = ReviewRequest.objects.public()[0]
site_profile = profile1.site_profiles.get(local_site=None)
profile1.star_review_request(review_request)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self.assertTrue(review_request in
profile1.starred_review_requests.all())
self.assertEqual(site_profile.starred_public_request_count, 1)
profile1.unstar_review_request(review_request)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self.assertFalse(review_request in
profile1.starred_review_requests.all())
self.assertEqual(site_profile.starred_public_request_count, 0)
|
yaxu/patternlib
|
pattern/migrations/0015_pattern_json.py
|
Python
|
gpl-3.0
| 444
| 0
|
# -*- coding: utf-8 -*-
#
|
Generated by Django 1.10.4 on 2017-05-30 22:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pattern', '0014_pattern_editnumber'),
]
operations = [
migrations.AddField(
model_name='pattern',
name='json',
|
field=models.TextField(null=True),
),
]
|
michelts/aloe_django
|
tests/integration/django/dill/leaves/features/steps.py
|
Python
|
gpl-3.0
| 1,684
| 0
|
#
import io
import json
import sys
from django.core.management import call_command
from leaves.models import (
Harvester,
Panda,
)
from aloe import after, step
from aloe.tools import guess_types
from aloe_django.steps.models import (
test_existence,
tests_existence,
write_models,
writes_models,
)
from nose.tools import assert_equals
max_rego = 0
@writes_models(Harvester)
def write_with_rego(data, field=None):
for hash_ in data:
hash_['rego'] = hash_['make'][:3].upper() + "001"
write_models(Harvester, data, field=field)
@tests_existence(Harvester)
def check_with_rego(queryset, data):
try:
data['rego'] = data['rego'].upper()
except KeyError:
pass
return test_existence(queryset, data)
@step(r
|
'The database dump is as follows')
def database_dump(step):
if sys.version_info
|
>= (3, 0):
output = io.StringIO()
else:
output = io.BytesIO()
call_command('dumpdata', stdout=output, indent=2)
output = output.getvalue()
assert_equals(json.loads(output), json.loads(step.multiline))
@step(r'I have populated the database')
def database_populated(step):
pass
@step(r'I count the harvesters')
def count_harvesters(step):
print("Harvester count: %d" % Harvester.objects.count())
@writes_models(Panda)
def write_pandas(data, field):
# It is not necessary to call hashes_data/guess_types, but it might be
# present in old code using the library. Test that it is a no-op
# in that case.
data = guess_types(data)
for hash_ in data:
if 'name' in hash_:
hash_['name'] += ' Panda'
return write_models(Panda, data, field)
|
mitsei/dlkit
|
tests/authorization/test_searches.py
|
Python
|
mit
| 5,434
| 0.001472
|
"""Unit tests of authorization searches."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
from dlkit.abstract_osid.osid import errors
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.type.primitives import Type
from dlkit.runtime import PROXY_SESSION, proxy_example
from dlkit.runtime.managers import Runtime
REQUEST = proxy_example.SimpleRequest()
CONDITION = PROXY_SESSION.get_proxy_condition()
CONDITION.set_http_request(REQUEST)
PROXY = PROXY_SESSION.get_proxy(CONDITION)
DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'})
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def authorization_search_class_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'AUTHORIZATION',
proxy=PROXY,
implementation=request.cls.service_config)
create_form = request.cls.svc_mgr.get_vault_form_for_create([])
create_form.display_name = 'Test catalog'
create_form.description = 'Test catalog description'
request.cls.catalog = request.cls.svc_mgr.create_vault(create_form)
def class_tear_down():
request.cls.svc_mgr.delete_vault(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def authorization_search_test_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.search = request.cls.catalog.get_authorization_search()
@pytest.mark.usefixtures("authorization_search_class_fixture", "authorization_search_test_fixture")
class TestAuthorizationSearch(object):
"""Tests for Author
|
izationSearch"""
@pytest.mark.skip('unimplemented te
|
st')
def test_search_among_authorizations(self):
"""Tests search_among_authorizations"""
pass
@pytest.mark.skip('unimplemented test')
def test_order_authorization_results(self):
"""Tests order_authorization_results"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_authorization_search_record(self):
"""Tests get_authorization_search_record"""
pass
@pytest.mark.usefixtures("authorization_search_results_class_fixture", "authorization_search_results_test_fixture")
class TestAuthorizationSearchResults(object):
"""Tests for AuthorizationSearchResults"""
@pytest.mark.skip('unimplemented test')
def test_get_authorizations(self):
"""Tests get_authorizations"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_authorization_query_inspector(self):
"""Tests get_authorization_query_inspector"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_authorization_search_results_record(self):
"""Tests get_authorization_search_results_record"""
pass
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def vault_search_class_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'AUTHORIZATION',
proxy=PROXY,
implementation=request.cls.service_config)
create_form = request.cls.svc_mgr.get_vault_form_for_create([])
create_form.display_name = 'Test catalog'
create_form.description = 'Test catalog description'
request.cls.catalog = request.cls.svc_mgr.create_vault(create_form)
def class_tear_down():
request.cls.svc_mgr.delete_vault(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def vault_search_test_fixture(request):
# From test_templates/resource.py::ResourceSearch::init_template
request.cls.search = request.cls.catalog.get_vault_search()
@pytest.mark.usefixtures("vault_search_class_fixture", "vault_search_test_fixture")
class TestVaultSearch(object):
"""Tests for VaultSearch"""
@pytest.mark.skip('unimplemented test')
def test_search_among_vaults(self):
"""Tests search_among_vaults"""
pass
@pytest.mark.skip('unimplemented test')
def test_order_vault_results(self):
"""Tests order_vault_results"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_vault_search_record(self):
"""Tests get_vault_search_record"""
pass
@pytest.mark.usefixtures("vault_search_results_class_fixture", "vault_search_results_test_fixture")
class TestVaultSearchResults(object):
"""Tests for VaultSearchResults"""
@pytest.mark.skip('unimplemented test')
def test_get_vaults(self):
"""Tests get_vaults"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_vault_query_inspector(self):
"""Tests get_vault_query_inspector"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_vault_search_results_record(self):
"""Tests get_vault_search_results_record"""
pass
|
pdeesawat/PSIT58_test_01
|
Test_Python_code/last/02_Indonesia/total_death_indonesia.py
|
Python
|
apache-2.0
| 1,399
| 0.010722
|
import plotly.plotly as py
import plotly.graph_objs as go
data = open('Real_Final_database_02.csv')
alldata = data.readlines()
listdata = []
for i in alldata:
listdata.append(i.strip().split(','))
type_z = ['Flood', 'Epidemic', 'Drought', 'Earthquake', 'Storm']
size_fill = [15,20,25,30,35]
fill_colors = ['#00d0f5', '#ff4a2e', 'a36800', '#ad9900', '#8b00db']
trace = []
for i in range(5):
year_x = []
death_z = []
types_y = []
for j in listdata:
if j[0] == 'Indonesia' and j[2] == type_z[i]:
year_x.append(int(j[1]))
death_z.append(int(j[5]))
types_y.append(type_z[i])
trace.append(go.Scatter(
x=year_x,
y=death_z,
name=type_z[i],
mode='markers',
marker=dict(
color = [fill_colors[i] for k in death_z],
size=[size_fill[i] for k in
|
death_z]
)
)
)
data = trace
layout = go.Layout(
title='Total Death In Indonesia',
showlegend=True,
height=600,
width=600,
xaxis=dict(
# set x-axis' labels direction at 45 degree angle
tickangle=-45,
),
yaxis=dict(
title="Total Death",
titlefont=dict(
color='#ff2323'
),
tickfont=dict(
color='#ff2323'
)
),
)
fig = go.Figure(data=data, l
|
ayout=layout)
plot_url = py.plot(fig, filename='Total_Death_in_Indonesia')
|
balanced/balanced-python
|
scenarios/debit_update/executable.py
|
Python
|
mit
| 284
| 0.003521
|
import balanced
balanced.configure('ak-test-2eKlj1ZDfAcZSARMf3NMhBHywDej0avSY')
debit = balanced.Debit.fetch('/debits/WD5EW7vbyXlTsudIGF5AkrEA')
debit.description =
|
'New description for d
|
ebit'
debit.meta = {
'facebook.id': '1234567890',
'anykey': 'valuegoeshere',
}
debit.save()
|
Aeronautics/aero
|
aero/commands/install.py
|
Python
|
bsd-3-clause
| 1,362
| 0.002937
|
# -*- coding: utf-8 -*-
from aero.__version__ import __version_info__
__author__ = 'nickl-'
from .base import CommandProcessor as CommandProcessor
class InstallCommand(CommandProcessor):
from .base import coroutine
package = ''
adapter = ''
def wiring(self):
self.out = self.write()
self.ticker.routine(self.progress(None))
return self.each(self.spacing(self.call(self.res()))
|
)
def seen(self, command, adapter, package, result=False):
self.package = package
self.adapter = adapter
return result
@coroutine
def res(self):
while True:
res = (yield)
if res[1] == 0:
print 'Successfully installed package: {} wit
|
h {}'.format(self.package, self.adapter)
else:
print 'Aborted: Error while installing package: {} {} returned exit code {}'.format(
self.package, self.adapter, res[1]
)
@coroutine
def write(self):
import sys
out = sys.stdout
while True:
text = (yield)
out.write(text)
@coroutine
def spacing(self, target):
while True:
payload = (yield)
print u'\n'
target.send(payload)
@coroutine
def progress(self, responder):
while True: (yield)
|
Fly-Style/metaprog_univ
|
Lab1/polls/migrations/0004_auto_20161201_1743.py
|
Python
|
mit
| 487
| 0.002053
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-01 17:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migra
|
tion(migrations.Migration):
dependencies = [
('polls',
|
'0003_bettoride_success'),
]
operations = [
migrations.AlterField(
model_name='bet',
name='isBetSuccessful',
field=models.NullBooleanField(default=None, verbose_name='BetSuccess'),
),
]
|
jwhitlock/web-platform-compat
|
webplatformcompat/tests/test_cache.py
|
Python
|
mpl-2.0
| 27,278
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `web-platform-compat` fields module."""
from datetime import datetime
from pytz import UTC
from django.contrib.auth.models import User
from django.test.utils import override_settings
from webplatformcompat.cache import Cache
from webplatformcompat.history import Changeset
from webplatformcompat.models import (
Browser, Feature, Maturity, Reference, Section, Specification, Support,
Version)
from .base import TestCase
class TestCache(TestCase):
def setUp(self):
self.cache = Cache()
self.login_user(groups=['change-resource'])
def test_browser_v1_serializer(self):
browser = self.create(Browser)
out = self.cache.browser_v1_serializer(browser)
expected = {
'id': browser.id,
'slug': u'',
'name': {},
'note': {},
'history:PKList': {
'app': u'webplatformcompat',
'model': 'historicalbrowser',
'pks': [browser.history.all()[0].pk],
},
'history_current:PK': {
'app': u'webplatformcompat',
'model': 'historicalbrowser',
'pk': browser.history.all()[0].pk,
},
'versions:PKList': {
'app': u'webplatformcompat',
'model': 'version',
'pks': [],
},
}
self.assertEqual(out, expected)
def test_browser_v1_serializer_empty(self):
self.assertEqual(None, self.cache.browser_v1_serializer(None))
def test_browser_v1_loader(self):
browser = self.create(Browser)
with self.assertNumQueries(3):
obj = self.cache.browser_v1_loader(browser.pk)
with self.assertNumQueries(0):
serialized = self.cache.browser_v1_serializer(obj)
self.assertTrue(serialized)
def test_browser_v1_loader_not_exist(self):
self.assertFalse(Browser.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.browser_v1_loader(666))
def test_browser_v1_invalidator(self):
browser = self.create(Browser)
self.assertEqual([], self.cache.browser_v1_invalidator(browser))
def test_changeset_v1_serializer(self):
created = datetime(2014, 10, 29, 8, 57, 21, 806744, UTC)
changeset = self.create(Changeset, user=self.user)
Changeset.objects.filter(pk=changeset.pk).update(
created=created, modified=created)
changeset = Changeset.objects.get(pk=changeset.pk)
out = self.cache.changeset_v1_serializer(changeset)
expected = {
'id': changeset.id,
'created:DateTime': '1414573041.806744',
'modified:DateTime': '1414573041.806744',
'target_resource_type': '',
'target_resource_id': 0,
'closed': False,
'user:PK': {
'app': u'auth',
'model': 'user',
'pk': self.user.pk,
},
'historical_browsers:PKList': {
'app': u'webplatformcompat',
'model': 'historicalbrowser',
'pks': []
},
'historical_features:PKList': {
'app': u'webplatformcompat',
'model': 'historicalfeature',
'pks': []
},
'historical_maturities:PKList': {
'app': u'webplatformcompat',
'model': 'historicalmaturity',
'pks': []
},
'historical_references:PKList': {
'app': u'webplatformcompat',
'model': 'historicalreference',
'pks': []
},
'historical_sections:PKList': {
'app': u'webplatformcompat',
'model': 'historicalsection',
'pks': []
},
'historical_specifications:PKList': {
'app': u'webplatformcompat',
'model': 'historicalspecification',
'pks': []
},
'historical_supports:PKList': {
'app': u'webplatformcompat',
'model': 'historicalsupport',
'pks': []
},
'historical_versions:PKList': {
'app': u'webplatformcompat',
'model': 'historicalversion',
'pks': []
},
}
self.assertEqual(out, expected)
def test_changeset_v1_serializer_empty(self):
self.assertEqual(None, self.cache.changeset_v1_serializer(None))
def test_changeset_v1_loader(self):
changeset = self.create(Changeset, user=self.user)
with self.assertNumQueries(9):
obj = self.cache.changeset_v1_loader(changeset.pk)
with self.assertNumQueries(0):
serialized = self.cache.changeset_v1_serializer(obj)
self.assertTrue(serialized)
def test_changeset_v1_loader_not_exist(self):
self.assertFalse(Changeset.objects.filter(pk=666).exists())
self.assertIsNone(self.cache.changeset_v1_loader(666))
def test_changeset_v1_invalidator(self):
changeset = self.create(Changeset, user=self.user)
self.assertEqual([], self.cache.changeset_v1_invalidator(changeset))
def test_feature_v1_serializer(self):
feature = self.create(
Feature, slug='the-slug', name='{"en": "A Name"}')
out = self.cache.feature_v1_serializer(feature)
expected = {
'id': feature.id,
'slug': 'the-slug',
'mdn_uri': {},
'experimental': False,
'standardized': True,
'stable': True,
'obsolete': False,
'name': {'en': 'A Name'},
'descendant_count': 0,
'references:PKList': {
'app': 'webplatformcompat',
'model': 'reference',
'pks': [],
},
'supports:PKList': {
'app': 'webplatformcompat',
'model': 'support',
'pks': [],
},
'parent:PK': {
'app': 'webplatformcompat',
'model': 'feature',
'pk': None,
},
'children:PKList': {
'app': 'webplatformcompat',
'model': 'feature',
'pks': [],
},
'row_children:PKList': {
'app': 'webplatformcompat',
'model': 'feature',
'pks': [],
},
'row_children_pks': [],
'page_children_pks': [],
'descendant_pks': [],
'row_descendant_pks': [],
'history:PKList': {
'app': 'webplatformcompat',
'model': 'historicalfeature',
'pks': [feature.history.all()[0].pk],
},
'history_current:PK': {
'app': 'webplatformcompat',
'model': 'historicalfeature',
'pk': feature.history.all()[0].pk,
},
}
self.assertEqual(out, expected)
def test_feature_v1_serializer_mixed_descendants(self):
feature = self.create(
Feature, slug='the-slug', name='{"en": "A Name"}')
child1 = self.create(Feature, slug='child1', parent=feature)
child2 = self.create(Feature, slug='child2', parent=feature)
child21 = self.create(Feature, slug='child2.1', parent=child2)
page1 = self.create(
Feature, slug='page1', parent=feature,
|
mdn_uri='{"en": "https://example.com/page1"}')
page2 = self.create(
Feature, slug='page2', parent=child2,
mdn_uri='{"en": "https://example.com/page2"}')
feature = Feature.objects.get(id=feature
|
.id)
out = self.cache.feature_v1_serializer(feature)
self.assertEqual(out['descendant_count'], 5)
self.assertEqual(
out['descendant_pks'],
[child1.pk, child2.pk, child21.pk, page2.pk, page1.pk])
self.assertEqual(
out
|
DNFcode/edx-platform
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 91,790
| 0.002528
|
"""
Student Views
"""
import datetime
import logging
import re
import uuid
import time
import json
from collections import defaultdict
from pytz import UTC
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.core.validators import validate_email, validate_slug, ValidationError
from django.db import IntegrityError, transaction
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
Http404)
from django.shortcuts import redirect
from django.utils.translation import ungettext
from django_future.csrf import ensure_csrf_cookie
from django.utils.http import cookie_date, base36_to_int
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.response import TemplateResponse
from ratelimitbackend.exceptions import RateLimitException
from requests import HTTPError
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from edxmako.shortcuts import render_to_response, render_to_string
from mako.exceptions import TopLevelLookupException
from course_modes.models import CourseMode
from student.models import (
Registration, UserProfile, PendingNameChange,
PendingEmailChange, CourseEnrollment, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration)
from student.forms import PasswordResetFormNoActive
from verify_student.models import SoftwareSecurePhotoVerification, MidcourseReverificationWindow
from certificates.models import CertificateStatuses, certificate_status_for_student
from dark_lang.models import DarkLangConfig
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore import ModuleStoreEnum
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement
from courseware.access import has_access
from django_comment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from bulk_email.models import Optout, CourseAuthorization
import shoppingcart
from shoppingcart.models import DonationConfiguration
from openedx.core.djangoapps.user_api.models import UserPreference
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import commit_on_success_with_read_committed
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from microsite_configuration import microsite
from util.password_policy_validators import (
validate_password_length, validate_password_complexity,
validate_password_dictionary
)
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
auth_pipeline_urls, set_logged_in_cookie,
check_verify_status_by_course
)
from xmodule.error_module import ErrorDescriptor
from shoppingcart.models import CourseRegistrationCode
from openedx.core.djangoapps.user_api.api import profile as profile_api
import analytics
from eventtracking import tracker
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint:
|
disable=invalid-name
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is ca
|
ched for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
# The course selection work is done in courseware.courses.
domain = settings.FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False
# do explicit check, because domain=None is valid
if domain is False:
domain = request.META.get('HTTP_HOST')
courses = get_courses(user, domain=domain)
courses = sort_by_announcement(courses)
context = {'courses': courses}
context.update(extra_context)
return render_to_response('index.html', context)
def embargo(_request):
"""
Render the embargo page.
Explains to the user why they are not able to access a particular embargoed course.
Tries to use the themed version, but fall back to the default if not found.
"""
try:
if settings.FEATURES["USE_CUSTOM_THEME"]:
return render_to_response("static_templates/theme-embargo.html")
except TopLevelLookupException:
pass
return render_to_response("static_templates/embargo.html")
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course):
"""
Get the certificate info needed to render the dashboard section for the given
student and course. Returns a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
"""
if not course.may_certify():
return {}
return _cert_info(user, course, certificate_status_for_student(user, course.id))
def reverification_info(course_enrollment_pairs, user, statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in status_list
Args:
course_enrollment_pairs (list): list of (course, enrollment) tuples
user (User): the user whose information we want
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
for (course, enrollment) in course_enrollment_pairs:
info = single_course_reverification_info(user, course, enrollment)
if info:
reverifications[info.status].append(info)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def single_course_reverification_info(user, course, enrollment): # pylint: di
|
ksmaheshkumar/grr
|
client/client_actions/tempfiles_test.py
|
Python
|
apache-2.0
| 6,106
| 0.003439
|
#!/usr/bin/env python
"""Tests for grr.client.client_actions.tempfiles."""
import os
import tempfile
import time
from grr.client.client_actions import tempfiles
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
class GRRTempFileTestDirectory(test_lib.GRRBaseTest):
"""Tests for GRR temp file utils when directory is provided."""
def setUp(self):
"""Create fake filesystem."""
super(GRRTempFileTestDirectory, self).setUp()
self.prefix = config_lib.CONFIG.Get("Client.tempfile_prefix")
self.existsdir = os.path.join(self.temp_dir, "this/exists/")
os.makedirs(self.existsdir)
self.not_exists = os.path.join(self.temp_dir, "does/not/exist/")
self.new_temp_file = os.path.join(self.not_exists, self.prefix)
def _CheckPermissions(self, filename, expected):
# Just look at the last 3 octets.
file_mode = os.stat(filename).st_mode & 0777
self.assertEqual(file_mode, expected)
def testCreateGRRTempFile(self):
fd = tempfiles.CreateGRRTempFile(self.not_exists, suffix=".exe")
self.assertTrue(fd.name.startswith(self.new_temp_file))
self.assertTrue(fd.name.endswith(".exe"))
self.assertTrue(os.path.exists(fd.name))
self._CheckPermissions(fd.name, 0700)
self._CheckPermissions(os.path.dirname(fd.name), 0700)
def testCreateGRRTempFileRelativePath(self):
self.assertRaises(tempfiles.ErrorBadPath,
tempfiles.CreateGRRTempFile, "../../blah")
def testCreateGRRTempFileWithLifetime(self):
fd = tempfiles.CreateGRRTempFile(self.not_exists, lifetime=0.1)
self.assertTrue(os.path.exists(fd.name))
time.sleep(1)
self.assertFalse(os.path.exists(fd.name))
def testDeleteGRRTempFile(self):
grr_tempfile = os.path.join(self.existsdir, self.prefix)
open(grr_tempfile, "w").write("something")
tempfiles.DeleteGRRTempFile(grr_tempfile)
self.assertFalse(os.path.exists(grr_tempfile))
def testDeleteGRRTempFileBadPrefix(self):
self.assertRaises(tempfiles.ErrorNotTempFile,
tempfiles.DeleteGRRTempFile,
os.path.join(self.existsdir, "/blah"))
def testDeleteGRRTempFileRelativePath(self):
self.assertRaises(tempfiles.ErrorBadPath,
tempfiles.DeleteGRRTempFile, "../../blah")
class GRRTempFileTestFilename(test_lib.GRRBaseTest):
"""Tests for GRR temp file utils when filename is provided."""
def setUp(self):
"""Create fake filesystem."""
super(GRRTempFileTestFilename, self).setUp()
# This is where temp files go if a directory is not provided.
# For this test it has to be different from the temp firectory
# so we create a new one.
self.old_client_tempdir = config_lib.CONFIG.Get("Client.tempdir")
self.client_tempdir = tempfile.mkdtemp(
dir=config_lib.CONFIG.Get("Client.tempdir"))
config_lib.CONFIG.Set("Client.tempdir", self.client_tempdir)
def tearDown(self):
os.rmdir(config_lib.CONFIG.Get("Client.tempdir"))
config_lib.CONFIG.Set("Client.tempdir", self.old_client_tempdir)
def testCreateAndDelete(self):
fd = tempfiles.CreateGRR
|
TempFile(filename="process.42.exe", mode="wb")
fd.close()
self.assertTrue(os.path.exists(fd.name))
self.assertTrue(os.path.basen
|
ame(fd.name) == "process.42.exe")
tempfiles.DeleteGRRTempFile(fd.name)
self.assertFalse(os.path.exists(fd.name))
fd = open(os.path.join(self.temp_dir, "notatmpfile"), "w")
fd.write("something")
fd.close()
self.assertTrue(os.path.exists(fd.name))
self.assertRaises(tempfiles.ErrorNotTempFile,
tempfiles.DeleteGRRTempFile,
fd.name)
self.assertTrue(os.path.exists(fd.name))
class DeleteGRRTempFiles(test_lib.EmptyActionTest):
"""Test DeleteGRRTempFiles client action."""
def setUp(self):
super(DeleteGRRTempFiles, self).setUp()
filename = "%s_blah" % config_lib.CONFIG["Client.tempfile_prefix"]
self.tempfile = utils.JoinPath(self.temp_dir,
"delete_test", filename)
self.dirname = os.path.dirname(self.tempfile)
os.makedirs(self.dirname)
config_lib.CONFIG.Set("Client.tempdir", self.dirname)
self.not_tempfile = os.path.join(self.temp_dir, "notatempfile")
open(self.not_tempfile, "w").write("something")
self.temp_fd = tempfiles.CreateGRRTempFile(self.dirname)
self.temp_fd2 = tempfiles.CreateGRRTempFile(self.dirname)
self.assertTrue(os.path.exists(self.not_tempfile))
self.assertTrue(os.path.exists(self.temp_fd.name))
self.assertTrue(os.path.exists(self.temp_fd2.name))
self.pathspec = rdfvalue.PathSpec(
path=self.dirname, pathtype=rdfvalue.PathSpec.PathType.OS)
def testDeleteGRRTempFilesInDirectory(self):
result = self.RunAction("DeleteGRRTempFiles",
self.pathspec)[0]
self.assertTrue(os.path.exists(self.not_tempfile))
self.assertFalse(os.path.exists(self.temp_fd.name))
self.assertFalse(os.path.exists(self.temp_fd2.name))
self.assertTrue(self.temp_fd.name in result.data)
self.assertTrue(self.temp_fd2.name in result.data)
def testDeleteGRRTempFilesSpecificPath(self):
self.pathspec = rdfvalue.PathSpec(
path=self.temp_fd.name, pathtype=rdfvalue.PathSpec.PathType.OS)
result = self.RunAction("DeleteGRRTempFiles",
self.pathspec)[0]
self.assertTrue(os.path.exists(self.not_tempfile))
self.assertFalse(os.path.exists(self.temp_fd.name))
self.assertTrue(os.path.exists(self.temp_fd2.name))
self.assertTrue(self.temp_fd.name in result.data)
self.assertFalse(self.temp_fd2.name in result.data)
def testDeleteGRRTempFilesPathDoesNotExist(self):
self.pathspec = rdfvalue.PathSpec(
path="/does/not/exist", pathtype=rdfvalue.PathSpec.PathType.OS)
self.assertRaises(tempfiles.ErrorBadPath,
self.RunAction, "DeleteGRRTempFiles", self.pathspec)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
trmccart/py-jnprwlc
|
examples/maker.py
|
Python
|
apache-2.0
| 369
| 0.00813
|
import demowlcutils
from demowlcutils import ppxml, WLC_login
from
|
pprint import pprint as pp
from jnpr.wlc import WirelessLanController as WLC
wlc = WLC(host='a', user='b', password='c')
r = wlc.RpcMaker( target='vlan', name='J
|
eremy')
# you can access the following attributes, refer to the jnpr.wlc.builder
# file for more details
# r.cmd
# r.target
# r.args
|
uranusjr/django-crispy-forms-ng
|
crispy_forms/tests/urls.py
|
Python
|
mit
| 292
| 0
|
import django
if
|
django.VERSION >= (1, 5):
from django.conf.urls import patterns, url
else:
from django.conf.urls.defaults import patterns, url
def simpleAction(request):
pass
urlpatterns = patterns(
'',
url(r'^simple/action/$', simpleAction, n
|
ame='simpleAction'),
)
|
fabio-otsuka/invesalius3
|
invesalius/gui/task_tools.py
|
Python
|
gpl-2.0
| 5,584
| 0.008596
|
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import wx
import os
import wx.lib.embeddedimage as emb
import wx.lib.hyperlink as hl
import wx.lib.platebtn as pbtn
from wx.lib.pubsub import pub as Publisher
import invesalius.constants as constants
import invesalius.constants as const
ID_BTN_MEASURE_LINEAR = wx.NewId()
ID_BTN_MEASURE_ANGULAR = wx.NewId()
ID_BTN_ANNOTATION = wx.NewId()
class TaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
inner_panel = InnerTaskPanel(self)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(inner_panel, 1, wx.EXPAND | wx.GROW | wx.BOTTOM | wx.RIGHT |
wx.LEFT, 7)
sizer.Fit(self)
self.SetSizer(sizer)
self.Update()
self.SetAutoLayout(1)
class InnerTaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.SetBackgroundColour(wx.Colour(255,255,255))
self.SetAutoLayout(1)
# Counter for projects loaded in current GUI
self.proj_count = 0
# Floating items (to be inserted)
self.float_hyper_list = []
# Fixed text and hyperlink items
tooltip = wx.ToolTip(_("Measure distances"))
txt_measure = wx.StaticText(self, -1, _("Measure"))
txt_measure.SetToolTip(tooltip)
tooltip = wx.ToolTip(_("Add text annotations"))
txt_annotation = hl.HyperLinkCtrl(self, -1,_("Add text annotations"))
txt_annotation.SetUnderlines(False, False, False)
txt_annotation.SetColours("BLACK", "BLACK", "BLACK")
txt_annotation.SetToolTip(tooltip)
txt_annotation.AutoBrowse(False)
txt_annotation.UpdateLink()
txt_annotation.Bind(hl.EVT_HYPERLINK_LEFT, self.OnTextAnnotation)
# Image(s) for buttons
BMP_ANNOTATE = wx.Bitmap(os.path.join(const.ICON_DIR, "annotation.png"), wx.BITMAP_TYPE_PNG)
BMP_ANGLE = wx.Bitmap(os.path.join(const.ICON_DIR, "measure_angle.jpg"), wx.BITMAP_TYPE_JPEG)
BMP_DISTANCE = wx.Bitmap(os.path.join(const.ICON_DIR, "measure_line.png"), wx.BITMAP_TYPE_PNG)
BMP_ANNOTATE.SetWidth(25)
BMP_ANNOTATE.SetHeight(25)
BMP_ANGLE.SetWidth(25)
BMP_ANGLE.SetHeight(25)
BMP_DISTANCE.SetWidth(25)
BMP_DISTANCE.SetHeight(25)
# Buttons related to hyperlinks
button_style = pbtn.PB_STYLE_SQUARE | pbtn.PB_STYLE_DEFAULT
button_measure_linear = pbtn.PlateButton(self, ID_BTN_MEASURE_LINEAR, "",
BMP_DISTANCE, style=button_style)
button_measure_angular = pbtn.PlateButton(self, ID_BTN_MEASURE_ANGULAR, "",
BMP_ANGLE, style=button_style)
button_annotation = pbtn.PlateButton(self, ID_BTN_ANNOTATION, "",
BMP_ANNOTATE, style=button_style)
# When using PlaneButton, it is necessary to bind events from parent win
self.Bind(wx.EVT_BUTTON, self.OnButton)
# Tags and grid sizer for fixed items
flag_link = wx.EXPAND|wx.GROW|wx.LEFT|wx.TOP
flag_button = wx.EXPAND | wx.GROW
sizer = wx.GridBagSizer(hgap=0, vgap=0)
sizer.Add(txt_measure,pos=(0,0),flag=wx.GROW|wx.EXPAND|wx.TOP,border=3)
sizer.Add(button_measure_linear,pos=(0,1),flag=wx.GROW|wx.EXPAND)
sizer.Add(button_measure_angular,pos=(0,2),flag=wx.GROW|wx.EXPAND)
sizer.Add(txt_annotation, pos=(1,0),flag=wx.GROW|wx.EXPAND)
sizer.Add(button_annotation, pos=(1,2),span=(2,1), flag=wx.GROW|wx.EXPAND)
sizer.AddGrowableCol(0)
# Add line sizers into main sizer
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(sizer, 0, wx.GROW|wx.EXPAND)
main_sizer.Fit(self)
# Update main sizer and panel layout
self.SetSizer(sizer)
self.Fit()
self.sizer = main_sizer
def OnTextAnnotation(self, evt=None):
print "TODO: Send Signal - Add text annotation (both 2d and 3d)"
def OnLinkLinearMeasure(self):
Publisher.sendMessage('Enable style',
constants.STATE_MEASURE_DISTANCE)
|
def OnLinkAngularMeasure(self):
Publisher.sendMessage('Enable style',
constants.STATE_MEASURE_ANGLE)
def OnButton(self, evt):
id = evt.GetId()
if id == ID_BTN_MEASURE_LINEAR:
self.OnLink
|
LinearMeasure()
elif id == ID_BTN_MEASURE_ANGULAR:
self.OnLinkAngularMeasure()
else: # elif id == ID_BTN_ANNOTATION:
self.OnTextAnnotation()
|
mjames-upc/python-awips
|
dynamicserialize/dstypes/com/raytheon/uf/common/message/Header.py
|
Python
|
bsd-3-clause
| 611
| 0.003273
|
##
##
# File auto-generated against equivalent DynamicSerialize Java clas
|
s
from .Property import Property
class
|
Header(object):
def __init__(self, properties=None, multimap=None):
if properties is None:
self.properties = []
else:
self.properties = properties
if multimap is not None:
for k, l in multimap.items():
for v in l:
self.properties.append(Property(k, v))
def getProperties(self):
return self.properties
def setProperties(self, properties):
self.properties = properties
|
yedivanseven/bestPy
|
tests/algorithms/similarities/test_similarities.py
|
Python
|
gpl-3.0
| 2,753
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest as ut
import numpy as np
import scipy.spatial.distance as spd
from .mock_data import Data
|
from ....algorithms.similarities import cosine_binary, cosine, dice, jac
|
card
from ....algorithms.similarities import kulsinski, russellrao, sokalsneath
MATRIX = np.array([[1, 0, 3, 5, 0, 2],
[0, 1, 2, 0, 4, 1],
[3, 4, 0, 0, 1, 1],
[5, 0, 1, 2, 3, 0],
[2, 0, 4, 2, 0, 0],
[0, 7, 0, 1, 2, 5],
[4, 2, 5, 3, 5, 4]])
BOOL_MATRIX = MATRIX.astype(bool).astype(float)
class TestSimilarities(ut.TestCase):
def setUp(self):
self.data = Data(MATRIX)
def test_cosine(self):
should_be = spd.squareform(spd.pdist(MATRIX.T, spd.cosine))
actually_is = (1 - cosine(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
def test_cosine_binary(self):
should_be = spd.squareform(spd.pdist(BOOL_MATRIX.T, spd.cosine))
actually_is = (1 - cosine_binary(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
def test_dice(self):
should_be = spd.squareform(spd.pdist(BOOL_MATRIX.T, spd.dice))
actually_is = (1 - dice(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
def test_jaccard(self):
should_be = spd.squareform(spd.pdist(BOOL_MATRIX.T, spd.jaccard))
actually_is = (1 - jaccard(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
def test_kulsinski(self):
n_items = MATRIX.shape[1]
should_be = np.zeros((n_items, n_items))
for i in range(n_items):
for j in range(n_items):
should_be[i, j] = spd.kulsinski(BOOL_MATRIX.T[i],
BOOL_MATRIX.T[j])
actually_is = (1 - kulsinski(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
def test_russellrao(self):
n_items = MATRIX.shape[1]
should_be = np.zeros((n_items, n_items))
for i in range(n_items):
for j in range(n_items):
should_be[i, j] = spd.russellrao(BOOL_MATRIX.T[i],
BOOL_MATRIX.T[j])
actually_is = (1 - russellrao(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
def test_sokalsneath(self):
should_be = spd.squareform(spd.pdist(BOOL_MATRIX.T, spd.sokalsneath))
actually_is = (1 - sokalsneath(self.data).toarray())
self.assertTrue(np.allclose(should_be, actually_is))
if __name__ == '__main__':
ut.main()
|
nmz787/microfluidic-cad
|
implicitCAD/process_all_escad.py
|
Python
|
gpl-2.0
| 1,723
| 0.007545
|
import subprocess
import os
# setup the path to extopenscad (aka implicitCAD)
cad_bin=os.path.expanduser('~/.cabal/bin/extopenscad')
# set the current input dir to this script file's dir
inp_dir = '.'
# set the output to a directory next to this script file
out_dir = './output'
# set the output file extension to .stl
out_type='.stl'
# make any paths relative (this probably needs work)
out_dir=os.path.abspath(out_dir)
if not os.path.isdir(inp_dir):
inp_dir=os.path.abspath(os.path.join(
os.path.dirname(__file__), inp_dir
)
)
# the -o option tells extopenscad to ouput a file to the subsequent path
o='-o'
try:
os.makedirs(out_dir)
except OSError as e:
# don't complain if the dir already exists
if e.errno!=17:
raise e
|
# go through each item in the input dir
for f in os.listdir(inp_dir):
# if the item is a .escad file, process
if '.escad' in f:
# remove .escad from the filename
|
n=f[0:f.index('.escad')]
# join the output dir with the stripped file and the output filetype
out= os.path.join(out_dir,
n + out_type
)
# join the input directory and the current dir list item
inp=os.path.join(inp_dir, f)
# emit what we came up with for a command
print cad_bin, o, out, inp
# run the command
p=subprocess.Popen([cad_bin, o, out, inp],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# wait for the command's output
s, e=p.communicate()
# print the command's output
print s
print e
|
gmenegoz/pycraft
|
projects/test.py
|
Python
|
gpl-3.0
| 1,511
| 0.001324
|
from pycraft_minetest import *
pos = where()
chat(pos)
maze("maze1.csv")
t = turtle(obsidian)
t.forward(10)
move(3, 10, 5)
chat(where())
sphere(ice, y=-20)
circle([wool, 5], direction="horizontal")
line(gold, 0, 0, 0, 0, 50, 0)
block(iron, y=3)
blocks(wood, x=5, y=6, z=10)
size = readnumber("tell the size...")
cube(redstone, size)
text =
|
readstring("say something...")
chat("I said: " + text)
pyramid(sandstone)
polygon(obsidian, 12, 30)
chat("Hello Minecraf
|
t!")
color = 0
uga = turtle([wool, color])
while True:
for i in range(18):
uga.forward(5)
uga.up(20)
uga.up(30)
color += 1
uga.penblock([wool, color % 12])
# GOLD in ICE
# while True:
# if over(ice):
# chat("ice")
# block(gold, y=-1)
# if near(gold):
# chat("gold nearby!")
# TURTLE LOOP
# uga = turtle(redstone)
# passi = 2
# while True:
# uga.forward(passi)
# uga.left(90)
# passi = passi + 2
# uga = turtle(redstone)
# bea = turtle(beacon)
# bea.setposition(0, 1, 0)
# # col = turtle(beacon)
#
# while True:
# uga.forward(1)
# bea.forward(1)
# uga = turtle(redstone)
# bea = turtle(powered_rail)
# bea.setposition(0, 1, 0)
# # col = turtle(beacon)
#
# while True:
# uga.forward(2)
# bea.forward(1)
# ANIMATE CUBE
# x = pos.x
# y = pos.y
# z = pos.z
# while True:
# cube(ice, 5, x, y, z, absolute=True)
# move(x-5, y+1, z+2, absolute=True)
# time.sleep(0.1)
# cube(air, 5, x, y, z, absolute=True)
# x += 1
|
apehua/pilas
|
pilasengine/fondos/color.py
|
Python
|
lgpl-3.0
| 498
| 0
|
# pila
|
s engine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
from pilasengine.fondos import fondo
class Color(fondo.Fondo):
def __init__(self, pilas, color):
fondo.Fondo.__init__(self, pilas)
(ancho, alto) = self.pilas.obtener_area()
self.imagen = self.pilas.imagenes.crear_supe
|
rficie(ancho, alto)
self.imagen.pintar(color)
|
szha/mxnet
|
benchmark/python/sparse/updater.py
|
Python
|
apache-2.0
| 2,695
| 0.001484
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import mxnet as mx
from mxnet.ndarray.sparse import adam_update
import numpy as np
import argparse
mx.random.seed(0)
np.random.seed(0)
parser = argparse.ArgumentParser(description='Benchmark adam updater')
parser.add_argument('--dim-in', type=int, default=240000, help='weight.shape[0]')
parser.add_argument('--dim-out', type=int, default=512, help='weight.shape[1]')
parser.add_argument('--nnr', type=int, default=5000, help='grad.indices.shape[0]')
parser.add_argument('--repeat', type=int, default=1000, help='num repeat')
parser.add_argument('--dense-grad', action='store_true',
help='if set to true, both gradient and weight are dense.')
parser.add_argument('--dense-state', action='store_true',
help='if set to true, states are dense, indicating standard update')
parser.add_argument('--cpu', action='store_true')
args = parser.parse_args()
dim_in = args.dim_in
dim_out = args.dim_out
nnr = args.nnr
ctx = mx.cpu() if args.cpu els
|
e mx.gpu()
ones = mx.nd.ones((dim_in, dim_out), ctx=ctx)
if not args.dense_grad:
weight = ones.tostype('row_sparse')
indices = np.arange(dim_in)
np.random.shuffle(indices)
indices = np.unique(indices[:nnr])
indices = mx.nd.array(indices, ctx=ctx)
grad = mx.nd.sparse.retain(weight, indices)
else:
weight = on
|
es.copy()
grad = ones.copy()
if args.dense_state:
mean = ones.copy()
else:
mean = ones.tostype('row_sparse')
var = mean.copy()
# warmup
for i in range(10):
adam_update(weight, grad, mean, var, out=weight, lr=1, wd=0, beta1=0.9,
beta2=0.99, rescale_grad=0.5, epsilon=1e-8)
weight.wait_to_read()
# measure speed
a = time.time()
for i in range(args.repeat):
adam_update(weight, grad, mean, var, out=weight, lr=1, wd=0, beta1=0.9,
beta2=0.99, rescale_grad=0.5, epsilon=1e-8)
weight.wait_to_read()
b = time.time()
print(b - a)
|
gkabbe/cMDLMC
|
mdlmc/LMC/output.py
|
Python
|
gpl-3.0
| 1,858
| 0.004306
|
# coding=utf-8
import numpy as np
class CovalentAutocorrelation:
def __init__(self, lattice):
self.reset(lattice)
def reset(self, lattice):
self.lattice = lattice.copy()
def calculate(self, lattice):
return np.sum((lattice == self.lattice) & (lattice != 0))
class MeanSquareDisplacement:
def __init__(self, atom_positions, lattice, atombox):
proton_number = np.sum(lattice >
|
0)
self.snapshot = np.zeros((proton_number, 3))
self.displacement = np.zeros_like(self.snapshot)
self.snapshot = self.determine_proton_positions(atom_positions, lattice)
self.atombox = atombox
def determine_proton_positions(self, atom_positions, lattice):
proton_positions = np.zeros_like(self.snapshot)
proton_labels = lattice[lattice > 0]
site_idx, = np.where(lattice)
proton_positions[proton_labels - 1] = atom_positions[site_
|
idx]
return proton_positions
def update_proton_positions(self, atom_positions, lattice):
self.snapshot[:] = self.determine_proton_positions(atom_positions, lattice)
def update_displacement(self, new_positions, lattice):
"""Update the current position of each proton while considering periodic boundaries.
This assumes that the trajectory time step is small enough that no proton ever moves
more than half of the periodic box length within one step."""
new_proton_positions = self.determine_proton_positions(new_positions, lattice)
displacement = self.atombox.distance(self.snapshot, new_proton_positions)
self.displacement += displacement
self.snapshot = new_proton_positions
def reset_displacement(self):
self.displacement[:] = 0
def msd(self):
return np.sum(self.displacement**2, axis=0) / self.displacement.shape[0]
|
natefoo/pulsar
|
tools/bootstrap_history.py
|
Python
|
apache-2.0
| 3,345
| 0.001794
|
#!/usr/bin/env python
# Little script to make HISTORY.rst more easy to format properly, lots TODO
# pull message down and embed, use arg parse, handle multiple, etc...
import os
import sys
try:
import requests
except ImportError:
requests = None
import urllib.parse
import textwrap
PROJECT_DIRECTORY = os.path.join(os.path.dirname(__file__), "..")
new_path = [PROJECT_DIRECTORY]
new_path.extend( sys.path[1:] ) # remove scripts/ from the path
sys.path = new_path
import pulsar as project
PROJECT_OWNER = project.PROJECT_OWNER
PROJECT_NAME = project.PROJECT_NAME
PROJECT_URL = "https://github.com/%s/%s" % (PROJECT_OWNER, PROJECT_NAME)
PROJECT_API = "https://api.github.com/repos/%s/%s/" % (PROJECT_OWNER, PROJECT_NAME)
AUTHORS_SKIP_CREDIT = ["jmchilton"]
def main(argv):
history_path = os.path.join(PROJECT_DIRECTORY, "HISTORY.rst")
history = open(history_path, "r", encoding="utf-8").read()
def extend(from_str, line):
from_str += "\n"
return history.replace(from_str, from_str + line + "\n")
ident = argv[1]
message = ""
if len(argv) > 2:
message = argv[2]
elif not (ident.startswith("pr") or ident.startswith("issue")):
api_url = urllib.parse.urljoin(PROJECT_API, "commits/%s" % ident)
req = requests.get(api_url).json()
commit = req["commit"]
message = commit["message"]
message = get_first_sentence(message)
elif requests is not None and ident.startswith("pr"):
pull_request = ident[len("pr"):]
api_u
|
rl = urllib.parse.urljoin(PROJECT_API, "pulls/%s" % pull_request)
req = requests.get(api_url).json()
m
|
essage = req["title"]
login = req["user"]["login"]
if login not in AUTHORS_SKIP_CREDIT:
message = message.rstrip(".")
message += " (thanks to `@%s`_)." % req["user"]["login"]
elif requests is not None and ident.startswith("issue"):
issue = ident[len("issue"):]
api_url = urllib.parse.urljoin(PROJECT_API, "issues/%s" % issue)
req = requests.get(api_url).json()
message = req["title"]
else:
message = ""
to_doc = message + " "
if ident.startswith("pr"):
pull_request = ident[len("pr"):]
text = ".. _Pull Request {0}: {1}/pull/{0}".format(pull_request, PROJECT_URL)
history = extend(".. github_links", text)
to_doc += "`Pull Request {0}`_".format(pull_request)
elif ident.startswith("issue"):
issue = ident[len("issue"):]
text = ".. _Issue {0}: {1}/issues/{0}".format(issue, PROJECT_URL)
history = extend(".. github_links", text)
to_doc += "`Issue {0}`_".format(issue)
else:
short_rev = ident[:7]
text = ".. _{0}: {1}/commit/{0}".format(short_rev, PROJECT_URL)
history = extend(".. github_links", text)
to_doc += "{0}_".format(short_rev)
to_doc = wrap(to_doc)
history = extend(".. to_doc", to_doc)
open(history_path, "w", encoding="utf-8").write(history)
def get_first_sentence(message):
first_line = message.split("\n")[0]
return first_line
def wrap(message):
wrapper = textwrap.TextWrapper(initial_indent="* ")
wrapper.subsequent_indent = ' '
wrapper.width = 78
return "\n".join(wrapper.wrap(message))
if __name__ == "__main__":
main(sys.argv)
|
xtao/code
|
vilya/libs/emoji.py
|
Python
|
bsd-3-clause
| 8,756
| 0.000457
|
#!/usr/bin/env python
# coding=utf-8
import re
import os
from cgi import escape
EMOJIS = [
':Aquarius:', ':two_women_in_love:', ':bus_stop:', ':speak_no_evil_monkey:',
':chicken:', ':heart_eyes:', ':Scorpius:', ':smiley_confused:',
':cat_face_with_wry_smile:', ':GAKUEngine:', ':pistol:', ':relieved:',
':wink:', ':grimacing:', ':rainbow_solid:', ':blowfish:',
':kissing_smiling_eyes:', ':tropical_drink:', ':face_with_medical_mask:',
':pill:', ':ruby:', ':cactus:', ':smiley_stuck_out_tongue_winking_eye:',
':boar:', ':smile:', ':face_with_tear_of_joy:', ':Cancer:',
':couple_in_love:', ':horse:', ':two_men_with_heart:', ':bowtie:',
':open_mouth:', ':frog_face:', ':Taurus:', ':octopus:', ':ship:',
':shooting_star:', ':face_with_ok_gesture:', ':wolf_face:', ':heart:',
':loudly_crying_face:', ':frowning:', ':scuba_diver:', ':love_hotel:',
':gentleman_octopus:', ':grinning_cat_face_with_smiling_eyes:',
':face_savouring_delicious_food:', ':rainbow:', ':mount_fuji:',
':victory_hand:', ':glowing_star:', ':ksroom:', ':beer_mug:', ':sweat:',
':hushed:', ':Pisces:', ':Capricorn:', ':stuck_out_tongue_winking_eye:',
':tennis_racquet_and_ball:', ':person_frowning:', ':spouting_whale:',
|
':tangerine:', ':person_bowing_deeply:', ':stuck_out_tongue_closed_eyes:',
':dog_face:', ':circled_ideograph_secret:', ':Libra:', ':jumping_spider:',
':disappointed_face:', ':hamburger:', ':octocat:', ':sleeping:',
':crescent_moon:', ':no_one_under_eighteen_symbol:', ':kissing:',
':unamused:', ':couple
|
_with_heart:', ':fisted_hand_sign:',
':smiling_cat_face_with_heart_shaped_eyes:', ':anguished:', ':groupme:',
':expressionless:', ':phone_book:', ':full_moon:', ':bactrian_camel:',
':snowboarder:', ':microphone:', ':Gemini:', ':fearful_face:',
':pensive_face:', ':jack_o_lantern:', ':Aries:', ':palm_pre3:',
':speech_balloon:', ':koala:', ':poop:', ':quoll:', ':kissing_closed_eyes:',
':thumbs_up_sign:', ':person_with_folded_hands:', ':puke_finger:',
':Giorgio:', ':princess:', ':waxing_gibbous_moon:', ':two_men_in_love:',
':happijar:', ':guitar:', ':sun_with_face:', ':RV:', ':cloud:',
':grinning:', ':genshin:', ':Sagittarius:',
':disappointed_but_relieved_face:', ':paw_prints:', ':rice_ball:',
':anchor:', ':smirk:', ':pegasus_black:', ':lgtm:', ':persevering_face:',
':elephant:', ':face_with_no_good_gesture:', ':snake:', ':wink2:',
':pizza:', ':white_smiling_face:', ':Leo:', ':sunrise_over_mountains:',
':monster:', ':relaxed:', ':grin:', ':laughing:', ':car:', ':cake:',
':Kagetsuki:', ':ninja:', ':siamese_kitten:', ':weary_face:', ':ghost:',
':milky_way:', ':penguin:', ':drunk:', ':crying_cat_face:', ':dancer:',
':snail:', ':person_raising_both_hands_in_celebration:', ':smiley:',
':penguin_chick:', ':video_game:', ':flushed:', ':shit:', ':worried:',
':cyclone:', ':DSLR_click:', ':jumping_spider_red:', ':ocean_dive_view:',
':astonished_face:', ':happy_person_raising_one_hand:', ':bgok:',
':family:', ':smiley_smile:', ':wheelchair:', ':Happy_FMC:',
':smiley_kissing_heart:', ':hatching_chick:', ':hear_no_evil_monkey:',
':Virgo:', ':skull:', ':two_women_holding_hands:', ':assault_rifle:',
':pouting_face:', ':high_hopes:', ':angry_face:'
]
EMOJIONE = [
':2714:', ':2716:', ':274C:', ':274E:', ':2753:', ':2754:', ':2755:',
':2757:', ':2764:', ':303D:', ':1F401:', ':1F402:', ':1F403:', ':1F404:',
':1F405:', ':1F406:', ':1F407:', ':1F409:', ':1F410:', ':1F411:',
':1F412:', ':1F413:', ':1F414:', ':1F415:', ':1F417:', ':1F418:',
':1F419:', ':1F420:', ':1F421:', ':1F422:', ':1F423:', ':1F425:',
':1F426:', ':1F427:', ':1F428:', ':1F429:', ':1F430:', ':1F431:',
':1F433:', ':1F434:', ':1F435:', ':1F436:', ':1F437:', ':1F438:',
':1F439:', ':1F493:', ':1F494:', ':1F495:', ':1F496:', ':1F497:',
':1F498:', ':1F499:', ':1F590:', ':1F591:', ':1F592:', ':1F593:',
':1F594:', ':1F595:', ':1F596:', ':1F598:', ':1F599:', ':1F59E:',
':1F59F:', ':1F600:', ':1F601:', ':1F602:', ':1F604:', ':1F605:',
':1F606:', ':1F607:', ':1F608:', ':1F609:', ':1F60A:', ':1F60C:',
':1F60D:', ':1F60E:', ':1F60F:', ':1F610:', ':1F611:', ':1F612:',
':1F614:', ':1F615:', ':1F616:', ':1F617:', ':1F618:', ':1F619:',
':1F61A:', ':1F61C:', ':1F61D:', ':1F61E:', ':1F61F:', ':1F620:',
':1F621:', ':1F622:', ':1F624:', ':1F625:', ':1F626:', ':1F627:',
':1F628:', ':1F629:', ':1F62A:', ':1F62C:', ':1F62D:', ':1F62E:',
':1F62F:', ':1F630:', ':1F631:', ':1F632:', ':1F634:', ':1F635:',
':1F636:', ':1F637:', ':1F638:', ':1F639:', ':1F63A:', ':1F63C:',
':1F63D:', ':1F63E:', ':1F63F:', ':1F640:', ':1F641:', ':1F642:',
':1F646:', ':1F647:', ':1F648:', ':1F649:', ':1F64A:', ':1F64B:',
':1F64C:', ':2049:', ':261D:', ':263A:', ':2705:', ':270A:', ':270B:',
':270C:', ':270F:', ':2716:', ':274C:', ':274E:', ':2753:', ':2754:',
':2755:', ':2757:', ':2764:'
]
TWEMOJI = [
':1f400:', ':1f401:', ':1f402:', ':1f403:', ':1f404:', ':1f405:',
':1f406:', ':1f407:', ':1f408:', ':1f409:', ':1f410:', ':1f411:',
':1f412:', ':1f413:', ':1f414:', ':1f415:', ':1f416:', ':1f417:',
':1f418:', ':1f419:', ':1f420:', ':1f421:', ':1f422:', ':1f423:',
':1f424:', ':1f425:', ':1f426:', ':1f427:', ':1f428:', ':1f429:',
':1f430:', ':1f431:', ':1f432:', ':1f433:', ':1f434:', ':1f435:',
':1f436:', ':1f437:', ':1f438:', ':1f439:', ':1f440:', ':1f445:',
':1f446:', ':1f447:', ':1f448:', ':1f449:', ':1f450:', ':1f600:',
':1f601:', ':1f602:', ':1f603:', ':1f604:', ':1f605:', ':1f606:',
':1f607:', ':1f608:', ':1f609:', ':1f60a:', ':1f60b:', ':1f60c:',
':1f60d:', ':1f60e:', ':1f60f:', ':1f610:', ':1f611:', ':1f612:',
':1f613:', ':1f614:', ':1f615:', ':1f616:', ':1f617:', ':1f618:',
':1f619:', ':1f61a:', ':1f61b:', ':1f61c:', ':1f61d:', ':1f61e:',
':1f61f:', ':1f620:', ':1f621:', ':1f622:', ':1f623:', ':1f624:',
':1f625:', ':1f626:', ':1f627:', ':1f628:', ':1f629:', ':1f62a:',
':1f62b:', ':1f62c:', ':1f62d:', ':1f62e:', ':1f62f:', ':1f630:',
':1f631:', ':1f632:', ':1f633:', ':1f634:', ':1f635:', ':1f636:',
':1f637:', ':1f638:', ':1f639:', ':1f63a:', ':1f63b:', ':1f63c:',
':1f63d:', ':1f63e:', ':1f63f:', ':1f640:', ':1f645:', ':1f646:',
':1f647:', ':1f648:', ':1f649:', ':1f64a:', ':1f64f:', ':1f680:',
':1f681:', ':1f682:'
]
EMOJI_GROUPS = {}
def parse_emoji_groups(text):
groups = set(RE_EMOJI_GROUPS.findall(text))
for group in groups:
group_text = EMOJI_GROUPS[group]
group_text = group_text.replace(' ', ' ')
group_text = group_text.replace('\n', "<br/>")
text = text.replace(group, group_text)
return text
def parse_emoji(text, is_escape=True):
if not text:
return ''
if is_escape:
text = escape(text)
text = parse_emoji_groups(text)
if RE_EMOJI_ONLY.match(text.strip()):
quick_emoji = True
emoji_img = '<img src="/static/emoji/%s.png" align="absmiddle"/>'
else:
quick_emoji = False
emoji_img = '<img src="/static/emoji/%s.png" height="20" width="20" align="absmiddle"/>'
def translate_emoji(x):
text = x.group()
line_emoji_img = '<img src="/static/emoji/%s.png" height="48" width="48" align="absmiddle"/>'
if not quick_emoji and RE_EMOJI_LINE.match(text):
return line_emoji_img % text.strip(':')
return emoji_img % text.strip(':')
result = RE_EMOJI.sub(translate_emoji, text)
return result
def all_emojis():
curdir = os.path.abspath(os.path.curdir)
emoji_dir = os.path.join(curdir, 'hub/static/emoji/')
if os.path.isdir(emoji_dir):
files = os.listdir(emoji_dir)
else:
realpath = os.path.dirname(os.path.realpath(__file__))
curdir = os.path.join(realpath, os.path.pardir, 'hub/static/emoji')
curdir = os.path.abspath(curdir)
if os.path.isdir(curdir):
files = os.listdir(emoji_dir)
else:
return EMOJIS
if files:
return [':{}:'.format(fn[:-4]) for fn in files if fn.end
|
bsipocz/ginga
|
ginga/misc/plugins/FBrowserBase.py
|
Python
|
bsd-3-clause
| 5,421
| 0.003689
|
#
# FBrowserBase.py -- Base class for file browser plugin for fits viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os, glob
import stat, time
from ginga.misc import Bunch
from ginga import GingaPlugin
from ginga import AstroImage
from ginga.util import paths
from ginga.util.six.moves import map, zip
class FBrowserBase(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(FBrowserBase, self).__init__(fv, fitsimage)
self.keywords = ['OBJECT', 'UT']
self.columns = [('Name', 'name'),
('Size', 'st_size'),
('Mode', 'st_mode'),
('Last Changed', 'st_mtime')
]
self.jumpinfo = []
homedir = paths.home
self.curpath = os.path.join(homedir, '*')
self.do_scanfits = False
self.moving_cursor = False
def close(self):
chname = self.fv.get_channelName(self.fitsimage)
self.fv.stop_local_plugin(chname, str(self))
return True
def file_icon(self, bnch):
if bnch.type == 'dir':
pb = self.folderpb
elif bnch.type == 'fits':
pb = self.fitspb
else:
pb = self.filepb
return pb
def open_file(self, path):
self.logger.debug("path: %s" % (path))
if path == '..':
curdir, curglob = os.path.split(self.curpath)
path = os.path.join(curdir, path, curglob)
if os.path.isdir(path):
path = os.path.join(path, '*')
self.browse(path)
elif os.path.exists(path):
#self.fv.load_file(path)
uri = "file://%s" % (path)
self.fitsimage.make_callback('drag-drop', [uri])
else:
self.browse(path)
def get_info(self, path):
dirname, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
ftype = 'file'
if os.path.isdir(path):
ftype = 'dir'
elif os.path.islink(path):
ftype = 'link'
elif ext.lower() == '.fits':
ftype = 'fits'
try:
filestat = os.stat(path)
bnch = Bunch.Bunch(path=path, name=filename, type=ftype,
st_mode=filestat.st_mode, st_size=filestat.st_size,
st_mtime=filestat.st_mtime)
except OSError as e:
# TODO: identify some kind of error with this path
bnch = Bunch.Bunch(path=path, name=filename, type=ftype,
st_mode=0, st_size=0,
st_mtime=0)
|
return bnch
def browse(self, path):
self.logger.debug("path: %s" % (path))
if os.path.isdir(path):
dirname = path
globname = None
else:
dirname, globname = os.path.split(path)
dirname = os.path.abspath(dirname)
# check validity of leading path name
if not os.path.isdir(dir
|
name):
self.fv.show_error("Not a valid path: %s" % (dirname))
return
if not globname:
globname = '*'
path = os.path.join(dirname, globname)
# Make a directory listing
self.logger.debug("globbing path: %s" % (path))
filelist = list(glob.glob(path))
filelist.sort(key=str.lower)
filelist.insert(0, os.path.join(dirname, '..'))
self.jumpinfo = list(map(self.get_info, filelist))
self.curpath = path
if self.do_scanfits:
self.scan_fits()
self.makelisting(path)
def scan_fits(self):
for bnch in self.jumpinfo:
if not bnch.type == 'fits':
continue
if 'kwds' not in bnch:
try:
in_f = AstroImage.pyfits.open(bnch.path, 'readonly')
try:
kwds = {}
for kwd in self.keywords:
kwds[kwd] = in_f[0].header.get(kwd, 'N/A')
bnch.kwds = kwds
finally:
in_f.close()
except Exception as e:
continue
def refresh(self):
self.browse(self.curpath)
def scan_headers(self):
self.browse(self.curpath)
def make_thumbs(self):
path = self.curpath
self.logger.info("Generating thumbnails for '%s'..." % (
path))
filelist = glob.glob(path)
filelist.sort(key=str.lower)
# find out our channel
chname = self.fv.get_channelName(self.fitsimage)
# Invoke the method in this channel's Thumbs plugin
# TODO: don't expose gpmon!
rsobj = self.fv.gpmon.getPlugin('Thumbs')
self.fv.nongui_do(rsobj.make_thumbs, chname, filelist)
def start(self):
self.win = None
self.browse(self.curpath)
def pause(self):
pass
def resume(self):
pass
def stop(self):
pass
def redo(self):
return True
#END
|
citrix-openstack-build/cinder
|
cinder/tests/test_solidfire.py
|
Python
|
apache-2.0
| 9,057
| 0.00011
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume.drivers.solidfire import SolidFire
LOG = logging.getLogger(__name__)
class SolidFireVolumeTestCase(test.TestCase):
def setUp(self):
super(SolidFireVolumeTestCase, self).setUp()
def fake_issue_api_request(obj, method, params):
if method is 'GetClusterInfo':
LOG.info('Called Fake GetClusterInfo...')
results = {'result': {'clusterInfo':
{'name': 'fake-cluster',
'mvip': '1.1.1.1',
'svip': '1.1.1.1',
'uniqueID': 'unqid',
'repCount': 2,
'attributes': {}}}}
return results
elif method is 'AddAccount':
LOG.info('Called Fake AddAccount...')
return {'result': {'accountID': 25}, 'id': 1}
elif method is 'GetAccountByName':
LOG.info('Called Fake GetAccountByName...')
results = {'result': {'account':
{'accountID': 25,
'username': params['username'],
'status': 'active',
'initiatorSecret': '123456789012',
'targetSecret': '123456789012',
'attributes': {},
'volumes': [6, 7, 20]}},
"id": 1}
return results
elif method is 'CreateVolume':
LOG.info('Called Fake CreateVolume...')
return {'result': {'volumeID': 5}, 'id': 1}
elif method is 'DeleteVolume':
LOG.info('Called Fake DeleteVolume...')
return {'result': {}, 'id': 1}
elif method is 'ListVolumesForAccount':
test_name = 'OS-VOLID-a720b3c0-d1f0-11e1-9b23-0800200c9a66'
LOG.info('Called Fake ListVolumesForAccount...')
result = {'result': {
'volumes': [{'volumeID': 5,
'name': test_name,
'accountID': 25,
'sliceCount': 1,
'totalSize': 1048576 * 1024,
'enable512e': True,
'access': "readWrite",
'status': "active",
'attributes':None,
'qos': None,
'iqn': test_name}]}}
return result
else:
LOG.error('Crap, unimplemented API call in Fake:%s' % method)
def fake_issue_api_request_fails(obj, method, params):
return {'error': {'code': 000,
'name': 'DummyError',
'message': 'This is a fake error response'},
'id': 1}
def fake_set_qos_by_volume_type(self, type_id, ctxt):
return {'minIOPS': 500,
'maxIOPS': 1000,
'burstIOPS': 1000}
def fake_volume_get(obj, key, default=None):
return {'qos': 'fast'}
def test_create_with_qos_type(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
self.stubs.Set(SolidFire, '_set_qos_by_volume_type',
self.fake_set_qos_by_volume_type)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'volume_type_id': 'fast'}
sfv = SolidFire()
model_update = sfv.create_volume(testvol)
def test_create_volume(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'volume_type_id': None}
sfv = SolidFire()
model_update = sfv.create_volume(testvol)
def test_create_volume_with_qos(self):
preset_qos = {}
preset_qos['qos'] = 'fast'
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'metadata': [preset_qos],
'volume_type_id': None}
sfv = SolidFire()
model_update = sfv.create_volume(testvol)
def test_create_volume_fails(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFire()
t
|
ry:
sfv.create_volume(testvol)
self.fail("Should have thrown Error")
except Exception:
pass
def test_create_sfaccount(self):
sfv = SolidFire()
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
account = sfv._create_sfaccount('project-id')
self.assertNotEqual(account, None)
def test_create_sfaccount_fails(self):
sfv = SolidFire()
self.stubs.Set(S
|
olidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
account = sfv._create_sfaccount('project-id')
self.assertEqual(account, None)
def test_get_sfaccount_by_name(self):
sfv = SolidFire()
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
account = sfv._get_sfaccount_by_name('some-name')
self.assertNotEqual(account, None)
def test_get_sfaccount_by_name_fails(self):
sfv = SolidFire()
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
account = sfv._get_sfaccount_by_name('some-name')
self.assertEqual(account, None)
def test_delete_volume(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'test_volume',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFire()
model_update = sfv.delete_volume(testvol)
def test_delete_volume_fails_no_volume(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFire()
try:
model_update = sfv.delete_volume(testvol)
self.fail("Should have thrown Error")
except Exception:
pass
def test_delete_volume_fails_account_lookup(self):
self.stubs.Set(SolidFire, '_issue_api_request',
|
grahamgilbert/Crypt-Server
|
server/migrations/0018_auto_20201029_2134.py
|
Python
|
apache-2.0
| 873
| 0
|
# Generated by Django 2.2.13 on 2020-10-29 21:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("server", "0017_merge_20181217_1829")]
operations = [
migrations.AlterField(
model_name="computer",
|
name="serial",
field=models.CharField(
max_length=200, unique=True, verbose_name="Serial Number"
),
),
migrations.AlterField(
model_name="secret",
name="secret_type",
field=models.Ch
|
arField(
choices=[
("recovery_key", "Recovery Key"),
("password", "Password"),
("unlock_pin", "Unlock PIN"),
],
default="recovery_key",
max_length=256,
),
),
]
|
PetePriority/home-assistant
|
homeassistant/components/device_tracker/tplink.py
|
Python
|
apache-2.0
| 16,061
| 0
|
"""
Support for TP-Link routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.tplink/
"""
import base64
from datetime import datetime
import hashlib
import logging
import re
from aiohttp.hdrs import (
ACCEPT, COOKIE, PRAGMA, REFERER, CONNECTION, KEEP_ALIVE, USER_AGENT,
CONTENT_TYPE, CACHE_CONTROL, ACCEPT_ENCODING, ACCEPT_LANGUAGE)
import requests
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME, HTTP_HEADER_X_REQUESTED_WITH)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['tplink==0.2.1']
_LOGGER = logging.getLogger(__name__)
HTTP_HEADER_NO_CACHE = 'no-cache'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string
})
def get_scanner(hass, config):
"""
Validate the configuration and return a TP-Link scanner.
The default way of integrating devices is to use a pypi
package, The TplinkDeviceScanner has been refactored
to depend on a pypi package, the other implementations
should be gradually migrated in the pypi package
"""
for cls in [
TplinkDeviceScanner, Tplink5DeviceScanner, Tplink4DeviceScanner,
Tplink3DeviceScanner, Tplink2DeviceScanner, Tplink1DeviceScanner
]:
scanner = cls(config[DOMAIN])
if scanner.success_init:
return scanner
return None
class TplinkDeviceScanner(DeviceScanner):
"""Queries the router for connected devices."""
def __init__(self, config):
"""Initialize the scanner."""
from tplink.tplink import TpLinkClient
host = config[CONF_HOST]
password = config[CONF_PASSWORD]
username = config[CONF_USERNAME]
self.success_init = False
try:
self.tplink_client = TpLinkClient(
password, host=host, username=username)
self.last_results = {}
self.success_init = self._update_info()
except requests.exceptions.ConnectionError:
_LOGGER.debug("ConnectionError in TplinkDeviceScanner")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results.keys()
def get_device_name(self, device):
"""Get the name of the device."""
return self.last_results.get(device)
def _update_info(self):
"""Ensure the information from the TP-Link router is up to date.
Return boolean if scanning successful.
"""
_LOGGER.info("Loading wireless clients...")
result = self.tplink_client.get_connected_devices()
if result:
self.last_results = result
return True
return False
class Tplink1DeviceScanner(DeviceScanner):
"""This class
|
queries a wireless router running TP-Link firmware."""
def __init__(self, config):
"""Initialize the scanner."""
host = config[CONF_HOST]
username, password = c
|
onfig[CONF_USERNAME], config[CONF_PASSWORD]
self.parse_macs = re.compile('[0-9A-F]{2}-[0-9A-F]{2}-[0-9A-F]{2}-' +
'[0-9A-F]{2}-[0-9A-F]{2}-[0-9A-F]{2}')
self.host = host
self.username = username
self.password = password
self.last_results = {}
self.success_init = False
try:
self.success_init = self._update_info()
except requests.exceptions.ConnectionError:
_LOGGER.debug("ConnectionError in Tplink1DeviceScanner")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def get_device_name(self, device):
"""Get firmware doesn't save the name of the wireless device."""
return None
def _update_info(self):
"""Ensure the information from the TP-Link router is up to date.
Return boolean if scanning successful.
"""
_LOGGER.info("Loading wireless clients...")
url = 'http://{}/userRpm/WlanStationRpm.htm'.format(self.host)
referer = 'http://{}'.format(self.host)
page = requests.get(
url, auth=(self.username, self.password),
headers={REFERER: referer}, timeout=4)
result = self.parse_macs.findall(page.text)
if result:
self.last_results = [mac.replace("-", ":") for mac in result]
return True
return False
class Tplink2DeviceScanner(Tplink1DeviceScanner):
"""This class queries a router with newer version of TP-Link firmware."""
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results.keys()
def get_device_name(self, device):
"""Get firmware doesn't save the name of the wireless device."""
return self.last_results.get(device)
def _update_info(self):
"""Ensure the information from the TP-Link router is up to date.
Return boolean if scanning successful.
"""
_LOGGER.info("Loading wireless clients...")
url = 'http://{}/data/map_access_wireless_client_grid.json' \
.format(self.host)
referer = 'http://{}'.format(self.host)
# Router uses Authorization cookie instead of header
# Let's create the cookie
username_password = '{}:{}'.format(self.username, self.password)
b64_encoded_username_password = base64.b64encode(
username_password.encode('ascii')
).decode('ascii')
cookie = 'Authorization=Basic {}' \
.format(b64_encoded_username_password)
response = requests.post(
url, headers={REFERER: referer, COOKIE: cookie},
timeout=4)
try:
result = response.json().get('data')
except ValueError:
_LOGGER.error("Router didn't respond with JSON. "
"Check if credentials are correct.")
return False
if result:
self.last_results = {
device['mac_addr'].replace('-', ':'): device['name']
for device in result
}
return True
return False
class Tplink3DeviceScanner(Tplink1DeviceScanner):
"""This class queries the Archer C9 router with version 150811 or high."""
def __init__(self, config):
"""Initialize the scanner."""
self.stok = ''
self.sysauth = ''
super(Tplink3DeviceScanner, self).__init__(config)
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
self._log_out()
return self.last_results.keys()
def get_device_name(self, device):
"""Get the firmware doesn't save the name of the wireless device.
We are forced to use the MAC address as name here.
"""
return self.last_results.get(device)
def _get_auth_tokens(self):
"""Retrieve auth tokens from the router."""
_LOGGER.info("Retrieving auth tokens...")
url = 'http://{}/cgi-bin/luci/;stok=/login?form=login' \
.format(self.host)
referer = 'http://{}/webpages/login.html'.format(self.host)
# If possible implement RSA encryption of password here.
response = requests.post(
url, params={'operation': 'login', 'username': self.username,
'password': self.password},
headers={REFERER: referer}, timeout=4)
try:
self.stok = response.json().get('data').get('stok')
_LOGGER.info(self.stok)
regex_result = re.search(
'sysauth=(.*);', response.headers['set-cookie'])
self.sysauth = regex_result.group(1)
_LOGGER.info(se
|
bobthebutcher/iosxe
|
tests/test_iosxe.py
|
Python
|
gpl-3.0
| 1,026
| 0.003899
|
"""
Tests are performed against csr1000v-universalk9.03.15.00.S.155-2.S-std.
"""
import unittest
from iosxe.iosxe import IOSXE
from iosxe.exceptions import AuthError
node = '172.16.92.134'
username = 'cisco'
password = 'cisco'
port = 55443
class TestIOSXE(unittest.TestCase):
def setUp(self):
self.xe = IOSXE(node=node, username=username, password=password, disable_warnings=True)
def test_iosxe_is_a_IOSXE
|
(self):
self.assertIsInstance(self.xe, IOSXE)
def test_invalid_user_pass_returns_auth_error(self):
self.assertRaises(AuthError, IOSXE, node=node, username='stuff', password='things',
disable_warnings=True)
def test_url_base(self):
self.assertEqual(self.xe.url_base, 'https://{0}:{1}/api/v1'.format(node, port))
def test_token_uri(self):
self.assertEqual(se
|
lf.xe.token_uri, '/auth/token-services')
def test_save_config_success(self):
resp = self.xe.save_config()
self.assertEqual(204, resp.status_code)
|
Codex-/Octo-SFTP
|
OctoSFTP/file.py
|
Python
|
mit
| 4,433
| 0.000451
|
import glob
import logging
import os
import shutil
from threading import Thread, Lock
class ClientFiles:
"""Processes the clients files to be moved etc"""
def __init__(self, settings, clients):
self.settings = settings
self.clients = clients
# Logging
self._logger = logging.getLogger(__name__)
# Dictionary of files to be moved, client is key
self.client_files = dict()
self.client_list = []
# Path templates
self.file_path = ("\\\\{0}\\" +
self.settings.client_path +
"\\" +
"*")
# Threading lock
self.lock = Lock()
def file_list(self, client):
"""
Creates a list of files for each client to be moved
:param client: Client to have file list generated
:return: Client files if any, otherwise returns None
"""
# print("Checking " + client + " for files")
client_files = []
for file_type in self.settings.file_types:
client_files.extend(glob.glob(self.file_path.format(client) +
file_type))
if len(client_files) > 0:
client_files = [file for file in client_files
if os.stat(file).st_size >=
self.settings.file_minsize]
if len(client_files) > 0:
self._logger.log(logging.INFO, client + ": " +
str(len(client_files)) + " file(s)")
# print(client, client_files)
self.client_files[client] = client_files
def thread_file_list(self):
"""
Threadsafe queue support function to prevent conflicts
"""
client_list = self.clients.clients_online
while len(client_list) > 0:
self.lock.acquire()
client = client_list.pop()
self.lock.release()
# Pass popped client to function
self.file_list(client)
def build_file_list(self):
"""
Processes clients to construct class file list
"""
active_threads = []
# Spawn instances for multithreading
for i in range(self.settings.client_connections):
instance = Thread(target=self.thread_file_list)
active_threads.append(instance)
instance.start()
# Allow threads to complete before proceeding
for instance in active_threads:
instance.join()
def thread_move_files(self):
while len(self.client_list) > 0:
self.lock.acquire()
client = self.client_list.pop()
self.lock.release()
# Pass popped client to function
self.move_files(self.client_files[client])
def move_files(self, file_list):
# print(file_list)
for file in file_list:
# print(os.path.basename(file))
os.chdir(self.settings.local_queue)
# Check if file exists locally already
if os.path.exists(os.path.basename(file)):
os.remove(os.path.basename(file))
|
self._logger.log(logging.WARNING, file +
|
" exists. "
"Current file removed")
try:
shutil.move(file, self.settings.local_queue)
# self.files.append(os.path.basename(file))
except OSError as error:
self._logger.log(logging.CRITICAL, file + " " + str(error))
# print(error)
self._logger.log(logging.INFO, file + " moved successfully")
def build_move_files(self):
"""
Move files from client to local store. This is done as a list per client
so as to not flood a specific sites WAN link and cause issues with
stores connectivity.
"""
active_threads = []
self.client_list = list(self.client_files.keys())
for i in range(self.settings.client_connections):
instance = Thread(target=self.thread_move_files)
active_threads.append(instance)
instance.start()
# Allow threads to complete before proceeding
for instance in active_threads:
instance.join()
def run(self):
self.build_file_list()
self.build_move_files()
|
pcu4dros/pandora-core
|
workspace/lib/python3.5/site-packages/pycparser/c_parser.py
|
Python
|
mit
| 63,805
| 0.001489
|
#------------------------------------------------------------------------------
# pycparser: c_parser.py
#
# CParser class: Parser and AST builder for the C language
#
# Copyright (C) 2008-2015, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
import re
from .ply import yacc
from . import c_ast
from .c_lexer import CLexer
from .plyparser import PLYParser, Coord, ParseError
from .ast_transforms import fix_switch_cases
class CParser(PLYParser):
def __init__(
self,
lex_optimize=True,
lexer=CLexer,
lextab='pycparser.lextab',
yacc_optimize=True,
yacctab='pycparser.yacctab',
yacc_debug=False,
taboutputdir=''):
""" Create a new CParser.
Some arguments for controlling the debug/optimization
level of the parser are provided. The defaults are
tuned for release/performance mode.
The simple rules for using them are:
*) When tweaking CParser/CLexer, set these to False
*) When releasing a stable parser, set to True
lex_optimize:
Set to False when you're modifying the lexer.
Otherwise, changes in the lexer won't be used, if
some lextab.py file exists.
When releasing with a stable lexer, set to True
to save the re-generation of the lexer table on
each run.
lexer:
Set this parameter to define the lexer to use if
you're not using the default CLexer.
lextab:
Points to the lex table that's used for optimized
mode. Only if you're modifying the lexer and want
some tests to avoid re-generating the table, make
this point to a local lex table file (that's been
earlier generated with lex_optimize=True)
yacc_optimize:
Set to False when you're modifying the parser.
Otherwise, changes in the parser won't be used, if
some parsetab.py file exists.
When releasing with a stable parser, set to True
to save the re-generation of the parser table on
each run.
yacctab:
Points to the yacc table that's used for optimized
mode. Only if you're modifying the parser, make
this point to a local yacc table file
yacc_debug:
Generate a parser.out file that explains how yacc
built the parsing table from the grammar.
taboutputdir:
Set this parameter to control the location of generated
lextab and yacctab files.
"""
self.clex = lexer(
error_func=self._lex_error_func,
on_lbrace_func=self._lex_on_lbrace_func,
on_rbrace_func=self._lex_on_rbrace_func,
type_lookup_func=self._lex_type_lookup_func)
self.clex.build(
optimize=lex_optimize,
lextab=lextab,
outputdir=taboutputdir)
self.tokens = self.clex.tokens
rules_with_opt = [
'abstract_declarator',
'assignment_expression',
'declaration_list',
'declaration_specifiers',
'designation',
'expression',
'identifier_list',
'init_declarator_list',
'initializer_list',
'parameter_type_list',
'specifier_qualifier_list',
'block_item_list',
'type_qualifier_list',
'struct_declarator_list'
]
for rule in rules_with_opt:
self._create_opt_rule(rule)
self.cparser = yacc.yacc(
module=self,
start='translation_unit_or_empty',
debug=yacc_debug,
optimize=yacc_optimize,
tabmodule=yacctab,
outputdir=taboutputdir)
# Stack of scopes for keeping track of symbols. _scope_stack[-1] is
# the current (topmost) scope. Each scope is a dictionary that
# specifies whether a name is a type. If _scope_stack[n][name] is
# True, 'name' is currently a type in the scope. If it's False,
# 'name' is used in the scope but not as a type (for instance, if we
# saw: int name;
# If 'name' is not a key in _scope_stack[n] then 'name' was not defined
# in this scope at all.
self._scope_stack = [dict()]
# Keeps track of the last token given to yacc (the lookahead token)
self._last_yielded_token = None
def parse(self, text, filename='', debuglevel=0):
""" Parses C code and returns an AST.
text:
A string containing the C source code
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
"""
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [dict()]
self._last_yielded_token = None
return self.cparser.parse(
input=text,
lexer=self.clex,
debug=debuglevel)
######################-- PRIVATE --######################
def _push_scope(self):
self._scope_stack.append(dict())
def _pop_scope(self):
assert len(self._scope_stack) > 1
self._scope_stack.pop()
def _add_typedef_name(self, name, coord):
""" Add a new typedef name (ie a TYPEID) to the current scope
"""
if not self._scope_stack[-1].get(name, True):
|
self._parse_error(
"Typedef %r previously decl
|
ared as non-typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = True
def _add_identifier(self, name, coord):
""" Add a new object, function, or enum member name (ie an ID) to the
current scope
"""
if self._scope_stack[-1].get(name, False):
self._parse_error(
"Non-typedef %r previously declared as typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = False
def _is_type_in_scope(self, name):
""" Is *name* a typedef-name in the current scope?
"""
for scope in reversed(self._scope_stack):
# If name is an identifier in this scope it shadows typedefs in
# higher scopes.
in_scope = scope.get(name)
if in_scope is not None: return in_scope
return False
def _lex_error_func(self, msg, line, column):
self._parse_error(msg, self._coord(line, column))
def _lex_on_lbrace_func(self):
self._push_scope()
def _lex_on_rbrace_func(self):
self._pop_scope()
def _lex_type_lookup_func(self, name):
""" Looks up types that were previously defined with
typedef.
Passed to the lexer for recognizing identifiers that
are types.
"""
is_type = self._is_type_in_scope(name)
return is_type
def _get_yacc_lookahead_token(self):
""" We need access to yacc's lookahead token in certain cases.
This is the last token yacc requested from the lexer, so we
ask the lexer.
"""
return self.clex.last_token
# To understand what's going on here, read sections A.8.5 and
# A.8.6 of K&R2 very carefully.
#
# A C type consists of a basic type declaration, with a list
# of modifiers. For example:
#
# int *c[5];
#
# The basic declaration here is 'int c', and the pointer and
# the array are the modifiers.
#
# Basic declarations are represented by TypeDecl (from module c_ast) and the
# modifiers are FuncDecl, PtrDecl and ArrayDecl.
#
# The standard states that whenever a new modifier is parsed, it should be
# added to the en
|
tectronics/chimerascan
|
chimerascan/deprecated/nominate_spanning_reads_v01.py
|
Python
|
gpl-3.0
| 5,222
| 0.003447
|
'''
Created on Jan 30, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import collections
import logging
from nominate_chimeras import Chimera, parse_discordant_reads
from find_discordant_reads import DiscordantType
def to_fastq(mate, qname, seq, qual):
return "@%s/%d\n%s\n+%s/%d\n%s" % (qname, mate+1, seq, qname, mate+1, qual)
def is_spanning(start, end, juncs):
return any(start < j < end for j in juncs)
def check_fragment(frag, tx5p, tx3p, nonmapping=True):
# mates that have two split segments mapping discordantly
# are automatically considered for spanning read detection
write5p = frag.discordant_type.discordant5p
write3p = frag.discordant_type.discordant3p
# check the padding boundaries to find mates with positions
# past the junction
if (not write5p) and (frag.clust5p.rname in tx5p):
write5p = is_spanning(frag.clust5p.pad_start,
frag.clust5p.pad_end,
tx5p[frag.clust5p.rname])
if (not write3p) and (frag.clust3p.rname in tx3p):
write3p = is_spanning(frag.clust3p.pad_start,
frag.clust3p.pad_end,
tx5p[frag.clust3p.rname])
if nonmapping and (frag.discordant_type.code == DiscordantType.NONMAPPING):
# TODO: automatically completely non-mapping reads that may
# be double-overlapping spanning reads, but only do this in
# single-segment mode to increase sensitivity
write5p = True
write3p = True
elif frag.discordant_type.code == DiscordantType.CONCORDANT_SINGLE:
# one of mates mapped and other is unmapped, so check
# the mapped mate and see whether it matches a chimera
# candidate
# TODO: check junction position to further refine decision
# by omitting reads that are far from the predicted junction
if (frag.clust5p.rname == "*") and (frag.clust3p.rname in tx3p):
write5p = True
if (frag.clust3p.rname == "*") and (frag.clust5p.rname in tx5p):
write3p = True
# write the potential spanning reads
reads = [None, None]
if write5p:
mate = 0 if frag.read1_is_sense else 1
reads[mate] = to_fastq(mate, frag.qname, frag.clust5p.seq, frag.clust5p.qual)
if write3p:
mate = 1 if frag.read1_is_sense else 0
reads[mate] = to_fastq(mate, frag.qname, frag.clust3p.seq, frag.clust3p.qual)
return reads
def nominate_spanning_reads(discordant_reads_fh,
chimeras_fh,
fastq_fh):
# build index of chimera candidates
logging.info("Indexing chimera candidates")
tx5p = collections.defaultdict(lambda: [])
tx3p = collections.defaultdict(lambda: [])
for chimera in Chimera.parse(chimeras_fh):
tx5p[chimera.mate5p.tx_name].append(chimera.mate5p.end)
tx3p[chimera.mate3p.tx_name].append(chimera.mate3p.start)
# parse discordant reads
logging.info("Nominating spanning reads")
read1, read2 = None, None
prev_qname = None
for frag in parse_discordant_reads(discordant_reads_fh):
if frag.discordant_type.is_genome:
continue
qname = frag.qname
if prev_qname is not None and (qname != prev_qname):
if read1 is not None:
print >>fastq_fh, read1
if read2 is not None:
print >>fastq_fh, read2
read1, read2 = None, None
# skip if reads already found
if (r
|
ead1 is not None) and (read2 is not None):
continue
# update read fastq
r1, r2 = check_fragment(frag, tx5p, tx3p)
if read1 is None: read1 = r1
if read2 is None: read2 = r2
prev_qname = qname
if read1 is not None:
print >>fastq_fh, read1
if read2
|
is not None:
print >>fastq_fh, read2
def main():
from optparse import OptionParser
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = OptionParser("usage: %prog [options] <qname_sorted_discordant_reads> <chimeras> <output.fq>")
options, args = parser.parse_args()
discordant_reads_file = args[0]
chimeras_file = args[1]
fastq_file = args[2]
nominate_spanning_reads(open(discordant_reads_file, 'r'),
open(chimeras_file, 'r'),
open(fastq_file, 'w'))
if __name__ == '__main__':
main()
|
NESCent/dplace
|
dplace_app/load.py
|
Python
|
mit
| 5,554
| 0.004681
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from itertools import groupby
from time import time
from functools import partial
import re
import django
django.setup()
from django.db import transaction
from clldutils.dsv import reader
from clldutils.text import split_text
from clldutils.path import Path
from clldutils import jsonlib
import attr
from dplace_app.models import Source
from loader.util import configure_logging, load_regions
from loader.society import society_locations, load_societies, load_society_relations
from loader.phylogenies import load_phylogenies
from loader.variables import load_vars
from loader.values import load_data
from loader.sources import load_references
from loader.glottocode import load_languages
comma_split = partial(split_text, separators=',', strip=True, brackets={})
semicolon_split = partial(split_text, separators=';', strip=True, brackets={})
def valid_enum_member(choices, instance, attribute, value):
if value not in choices:
raise ValueError(value)
@attr.s
class Variable(object):
category = attr.ib(convert=lambda s: [c.capitalize() for c in comma_split(s)])
id = attr.ib()
title = attr.ib()
definition = attr.ib()
type = attr.ib(
validator=partial(valid_enum_member, ['Continuous', 'Categorical', 'Ordinal']))
units = attr.ib()
source = attr.ib()
changes = attr.ib()
notes = attr.ib()
codes = attr.ib(default=attr.Factory(list))
@attr.s
class Data(object):
soc_id = attr.ib()
sub_case = attr.ib()
year = attr.ib()
var_id = attr.ib()
code = attr.ib()
comment = attr.ib()
references = attr.ib(convert=semicolon_split)
source_coded_data = attr.ib()
admin_comment = attr.ib()
@attr.s
class ObjectWithSource(object):
id =
|
attr.ib()
name = attr.ib()
year = attr.ib()
author = attr.ib()
reference = attr.ib()
base_dir = attr.ib()
@property
def dir(self):
return self.base_dir.joinpath(self.id)
def as_source(self):
return Source.objects.create(
|
**{k: getattr(self, k) for k in 'year author name reference'.split()})
@attr.s
class RelatedSociety(object):
dataset = attr.ib(convert=lambda s: s.strip())
name = attr.ib(convert=lambda s: s.strip())
id = attr.ib(convert=lambda s: s.strip())
@classmethod
def from_string(cls, s):
match = re.match('([A-Za-z]+):\s*([^\[]+)\[([^\]]+)\]$', s)
if not match:
raise ValueError(s)
return cls(*match.groups())
@attr.s
class RelatedSocieties(object):
id = attr.ib()
related = attr.ib(convert=lambda s: [
RelatedSociety.from_string(ss) for ss in semicolon_split(s)])
@attr.s
class Dataset(ObjectWithSource):
type = attr.ib(validator=partial(valid_enum_member, ['cultural', 'environmental']))
description = attr.ib()
url = attr.ib()
def _items(self, what, **kw):
fname = self.dir.joinpath('{0}.csv'.format(what))
return list(reader(fname, **kw)) if fname.exists() else []
@property
def data(self):
return [Data(**d) for d in self._items('data', dicts=True)]
@property
def references(self):
return self._items('references', namedtuples=True)
@property
def societies(self):
return self._items('societies', namedtuples=True)
@property
def society_relations(self):
return [
RelatedSocieties(**d) for d in self._items('societies_mapping', dicts=True)]
@property
def variables(self):
codes = {vid: list(c) for vid, c in groupby(
sorted(self._items('codes', namedtuples=True), key=lambda c: c.var_id),
lambda c: c.var_id)}
return [
Variable(codes=codes.get(v['id'], []), **v)
for v in self._items('variables', dicts=True)]
@attr.s
class Phylogeny(ObjectWithSource):
scaling = attr.ib()
url = attr.ib()
@property
def trees(self):
return self.dir.joinpath('summary.trees')
@property
def taxa(self):
return list(reader(self.dir.joinpath('taxa.csv'), dicts=True))
class Repos(object):
def __init__(self, dir_):
self.dir = dir_
self.datasets = [
Dataset(base_dir=self.dir.joinpath('datasets'), **r) for r in
reader(self.dir.joinpath('datasets', 'index.csv'), dicts=True)]
self.phylogenies = [
Phylogeny(base_dir=self.dir.joinpath('phylogenies'), **r) for r in
reader(self.dir.joinpath('phylogenies', 'index.csv'), dicts=True)]
def path(self, *comps):
return self.dir.joinpath(*comps)
def read_csv(self, *comps, **kw):
return list(reader(self.path(*comps), **kw))
def read_json(self, *comps):
return jsonlib.load(self.path(*comps))
def load(repos, test=True):
configure_logging(test=test)
repos = Repos(repos)
for func in [
load_societies,
load_society_relations,
load_regions,
society_locations,
load_vars,
load_languages,
load_references,
load_data,
load_phylogenies,
]:
with transaction.atomic():
if not test:
print("%s..." % func.__name__) # pragma: no cover
start = time()
res = func(repos)
if not test: # pragma: no cover
print("{0} loaded in {1:.2f} secs".format(res, time() - start))
if __name__ == '__main__': # pragma: no cover
load(Path(sys.argv[1]), test=False)
sys.exit(0)
|
laufercenter/meld
|
meld/system/protein.py
|
Python
|
mit
| 7,985
| 0.004258
|
#
# Copyright 2015 by Justin MacCallum, Alberto Perez, Ken Dill
# All rights reserved
#
import numpy as np
import math
class ProteinBase(object):
'''
Base class for other Protein classes.
Provides functionality for translation/rotation and adding H-bonds.
'''
def __init__(self):
self._translation_vector = np.zeros(3)
self._rotatation_matrix = np.eye(3)
self._disulfide_list = []
self._general_bond = []
self._prep_files = []
self._frcmod_files = []
self._lib_files = []
def set_translation(self, translation_vector):
'''
Set the translation vector.
:param translation_vector: ``numpy.array(3)`` in nanometers
Translation happens after rotation.
'''
self._translation_vector = np.array(translation_vector)
def set_rotation(self, rotation_axis, theta):
'''
Set the rotation.
:param rotation_axis: ``numpy.array(3)`` in nanometers
:param theta: angle of rotation in degrees
Rotation happens after translation.
'''
theta = theta * 180 / math.pi
rotation_axis = rotation_axis / np.linalg.norm(rotation_axis)
a = np.cos(theta / 2.)
b, c, d = -rotation_axis * np.sin(theta / 2.)
self._rotatation_matrix = np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
[2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
[2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]])
def add_bond(self, res_index_i, res_index_j, atom_name_i, atom_name_j, bond_type):
'''
Add a general bond.
:param res_index_i: one-based index of residue i
:param res_index_j: one-based index of residue j
:param atom_name_i: string name of i
:param atom_name_j: string name of j
:param bond_type: string specifying the "S", "D","T"... bond
.. note::
indexing starts from one and the residue numbering from the PDB file is ignored.
'''
self._general_bon
|
d.append((res_index_i, res_index_j,atom_name_i,atom_name_j,bond_type))
def add_disulfide(self, res_index_
|
i, res_index_j):
'''
Add a disulfide bond.
:param res_index_i: one-based index of residue i
:param res_index_j: one-based index of residue j
.. note::
indexing starts from one and the residue numbering from the PDB file is ignored. When loading
from a PDB or creating a sequence, residue name must be CYX, not CYS.
'''
self._disulfide_list.append((res_index_i, res_index_j))
def add_prep_file(self,fname):
'''
Add a prep file.
This will be needed when using residues that
are not defined in the general amber force field
'''
self._prep_files.append(fname)
def add_frcmod_file(self,fname):
'''
Add a frcmod file.
This will be needed when using residues that
are not defined in the general amber force field
'''
self._frcmod_files.append(fname)
def add_lib_file(self,fname):
'''
Add a lib file.
This will be needed when using residues that
are not defined in the general amber force field
'''
self._lib_files.append(fname)
def _gen_translation_string(self, mol_id):
return '''translate {mol_id} {{ {x} {y} {z} }}'''.format(mol_id=mol_id,
x=self._translation_vector[0],
y=self._translation_vector[1],
z=self._translation_vector[2])
def _gen_rotation_string(self, mol_id):
return ''
def _gen_bond_string(self,mol_id):
bond_strings = []
for i,j,a,b,t in self._general_bond:
d = 'bond {mol_id}.{i}.{a} {mol_id}.{j}.{b} "{t}"'.format(mol_id=mol_id, i=i, j=j, a=a, b=b, t=t)
bond_strings.append(d)
return bond_strings
def _gen_disulfide_string(self, mol_id):
disulfide_strings = []
for i, j in self._disulfide_list:
d = 'bond {mol_id}.{i}.SG {mol_id}.{j}.SG'.format(mol_id=mol_id, i=i, j=j)
disulfide_strings.append(d)
return disulfide_strings
def _gen_read_prep_string(self):
prep_string = []
for p in self._prep_files:
prep_string.append('loadAmberPrep {}'.format(p))
return prep_string
def _gen_read_frcmod_string(self):
frcmod_string = []
for p in self._frcmod_files:
frcmod_string.append('loadAmberParams {}'.format(p))
return frcmod_string
def _gen_read_lib_string(self):
lib_string = []
for p in self._lib_files:
lib_string.append('loadoff {}'.format(p))
return lib_string
class ProteinMoleculeFromSequence(ProteinBase):
'''
Class to create a protein from sequence. This class will create a protein molecule from sequence. This class is pretty dumb and relies on AmberTools
to do all of the heavy lifting.
:param sequence: sequence of the protein to create
The sequence is specified in Amber/Leap format. There are special NRES and CRES variants for the N-
and C-termini. Different protonation states are also available via different residue names. E.g. ASH
for neutral ASP.
'''
def __init__(self, sequence):
super(ProteinMoleculeFromSequence, self).__init__()
self._sequence = sequence
def prepare_for_tleap(self, mol_id):
# we don't need to do anything
pass
def generate_tleap_input(self, mol_id):
leap_cmds = []
leap_cmds.append('source leaprc.gaff')
leap_cmds.extend(self._gen_read_frcmod_string())
leap_cmds.extend(self._gen_read_prep_string())
leap_cmds.extend(self._gen_read_lib_string())
leap_cmds.append('{mol_id} = sequence {{ {seq} }}'.format(mol_id=mol_id, seq=self._sequence))
leap_cmds.extend(self._gen_disulfide_string(mol_id))
leap_cmds.extend(self._gen_bond_string(mol_id))
leap_cmds.append(self._gen_rotation_string(mol_id))
leap_cmds.append(self._gen_translation_string(mol_id))
return leap_cmds
class ProteinMoleculeFromPdbFile(ProteinBase):
'''
Create a new protein molecule from a pdb file.
This class is dumb and relies on AmberTools for the heavy lifting.
:param pdb_path: string path to the pdb file
.. note::
no processing happens to this pdb file. It must be understandable by tleap and atoms/residues may
need to be added/deleted/renamed. These manipulations should happen to the file before MELD is invoked.
'''
def __init__(self, pdb_path):
super(ProteinMoleculeFromPdbFile, self).__init__()
with open(pdb_path) as pdb_file:
self._pdb_contents = pdb_file.read()
def prepare_for_tleap(self, mol_id):
# copy the contents of the pdb file into the current working directory
pdb_path = '{mol_id}.pdb'.format(mol_id=mol_id)
with open(pdb_path, 'w') as pdb_file:
pdb_file.write(self._pdb_contents)
def generate_tleap_input(self, mol_id):
leap_cmds = []
leap_cmds.append('source leaprc.gaff')
leap_cmds.extend(self._gen_read_frcmod_string())
leap_cmds.extend(self._gen_read_prep_string())
leap_cmds.extend(self._gen_read_lib_string())
leap_cmds.append('{mol_id} = loadPdb {mol_id}.pdb'.format(mol_id=mol_id))
leap_cmds.extend(self._gen_bond_string(mol_id))
leap_cmds.extend(self._gen_disulfide_string(mol_id))
leap_cmds.append(self._gen_rotation_string(mol_id))
leap_cmds.append(self._gen_translation_string(mol_id))
#print leap_cmds
return leap_cmds
|
peap/gnucash_explorer
|
gnucash_explorer/settings.py
|
Python
|
mit
| 2,089
| 0
|
"""
Django settings for gnucash_explorer project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = None
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.c
|
ontrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMid
|
dleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'gnucash_explorer.urls'
WSGI_APPLICATION = 'gnucash_explorer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
try:
from gnucash_explorer.local_settings import *
except ImportError as e:
print('You should set up your local_settings.py')
|
russellb/nova
|
nova/api/openstack/compute/contrib/virtual_storage_arrays.py
|
Python
|
apache-2.0
| 23,474
| 0.001406
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" The virtul storage array extension"""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.contrib import volumes
from nova.api.openstack import extensions
from nova.api.openstack.compute import servers
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import instance_types
from nova import network
from nova import exception
from nova import flags
from nova import log as logging
from nova import vsa
from nova import volume
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute',
'virtual_storage_arrays')
def _vsa_view(context, vsa, details=False, instances=None):
"""Map keys for vsa summary/detailed view."""
d = {}
d['id'] = vsa.get('id')
d['name'] = vsa.get('name')
d['displayName'] = vsa.get('display_name')
d['displayDescription'] = vsa.get('display_description')
d['createTime'] = vsa.get('created_at')
d['status'] = vsa.get('status')
if 'vsa_instance_type' in vsa:
d['vcType'] = vsa['vsa_instance_type'].get('name', None)
else:
d['vcType'] = vsa['instance_type_id']
d['vcCount'] = vsa.get('vc_count')
d['driveCount'] = vsa.get('vol_count')
d['ipAddress'] = None
for instance in instances:
fixed_addr = None
floating_addr = None
if instance['fixed_ips']:
fixed = instance['fixed_ips'][0]
fixed_addr = fixed['address']
if fixed['floating_ips']:
floating_addr = fixed['floating_ips'][0]['address']
if floating_addr:
d['ipAddress'] = floating_addr
break
else:
d['ipAddress'] = d['ipAddress'] or fixed_addr
return d
def make_vsa(elem):
elem.set('id')
elem.set('name')
elem.set('di
|
splayName')
elem.set('displayDescription')
elem.set('createTime')
elem.set('status')
elem.set('vcType')
elem.set('vcCount')
elem.set('driveCount')
elem.set('ipAddress')
class VsaTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('vsa', selector='vsa')
make_vsa(root)
return xmlutil.MasterTemplate(root, 1)
class VsaSetTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.Temp
|
lateElement('vsaSet')
elem = xmlutil.SubTemplateElement(root, 'vsa', selector='vsaSet')
make_vsa(elem)
return xmlutil.MasterTemplate(root, 1)
class VsaController(object):
"""The Virtual Storage Array API controller for the OpenStack API."""
def __init__(self):
self.vsa_api = vsa.API()
self.compute_api = compute.API()
self.network_api = network.API()
super(VsaController, self).__init__()
def _get_instances_by_vsa_id(self, context, id):
return self.compute_api.get_all(context,
search_opts={'metadata': dict(vsa_id=str(id))})
def _items(self, req, details):
"""Return summary or detailed list of VSAs."""
context = req.environ['nova.context']
authorize(context)
vsas = self.vsa_api.get_all(context)
limited_list = common.limited(vsas, req)
vsa_list = []
for vsa in limited_list:
instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
vsa_list.append(_vsa_view(context, vsa, details, instances))
return {'vsaSet': vsa_list}
@wsgi.serializers(xml=VsaSetTemplate)
def index(self, req):
"""Return a short list of VSAs."""
return self._items(req, details=False)
@wsgi.serializers(xml=VsaSetTemplate)
def detail(self, req):
"""Return a detailed list of VSAs."""
return self._items(req, details=True)
@wsgi.serializers(xml=VsaTemplate)
def show(self, req, id):
"""Return data about the given VSA."""
context = req.environ['nova.context']
authorize(context)
try:
vsa = self.vsa_api.get(context, vsa_id=id)
except exception.NotFound:
raise exc.HTTPNotFound()
instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
return {'vsa': _vsa_view(context, vsa, True, instances)}
@wsgi.serializers(xml=VsaTemplate)
def create(self, req, body):
"""Create a new VSA."""
context = req.environ['nova.context']
authorize(context)
if not body or 'vsa' not in body:
LOG.debug(_("No body provided"), context=context)
raise exc.HTTPUnprocessableEntity()
vsa = body['vsa']
display_name = vsa.get('displayName')
vc_type = vsa.get('vcType', FLAGS.default_vsa_instance_type)
try:
instance_type = instance_types.get_instance_type_by_name(vc_type)
except exception.NotFound:
raise exc.HTTPNotFound()
LOG.audit(_("Create VSA %(display_name)s of type %(vc_type)s"),
locals(), context=context)
args = dict(display_name=display_name,
display_description=vsa.get('displayDescription'),
instance_type=instance_type,
storage=vsa.get('storage'),
shared=vsa.get('shared'),
availability_zone=vsa.get('placement', {}).\
get('AvailabilityZone'))
vsa = self.vsa_api.create(context, **args)
instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
return {'vsa': _vsa_view(context, vsa, True, instances)}
def delete(self, req, id):
"""Delete a VSA."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete VSA with id: %s"), id, context=context)
try:
self.vsa_api.delete(context, vsa_id=id)
except exception.NotFound:
raise exc.HTTPNotFound()
def associate_address(self, req, id, body):
""" /zadr-vsa/{vsa_id}/associate_address
auto or manually associate an IP to VSA
"""
context = req.environ['nova.context']
authorize(context)
if body is None:
ip = 'auto'
else:
ip = body.get('ipAddress', 'auto')
LOG.audit(_("Associate address %(ip)s to VSA %(id)s"),
locals(), context=context)
try:
instances = self._get_instances_by_vsa_id(context, id)
if instances is None or len(instances) == 0:
raise exc.HTTPNotFound()
for instance in instances:
self.network_api.allocate_for_instance(context, instance,
vpn=False)
# Placeholder
return
except exception.NotFound:
raise exc.HTTPNotFound()
def disassociate_address(self, req, id, body):
""" /zadr-vsa/{vsa_id}/disassociate_address
auto or manually associate an IP to VSA
"""
context = req.environ['nova.context']
authorize(context)
if body is None:
ip = 'auto'
else:
ip = body.get('ipAddress', 'auto')
LOG.audit(_("Disassociate address from VSA %(id)s"),
locals(), context=context)
# Placeholder
def make_volume(elem):
volumes.make_volume(elem)
elem.set('name')
elem.set('vsaId'
|
google-research/google-research
|
non_semantic_speech_benchmark/export_model/tf_pad.py
|
Python
|
apache-2.0
| 2,326
| 0.007739
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS
|
OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A TensorFlow pad function that works like numpy.
Specifically, tf.pad can't more than double the length of a Tensor, while
numpy's can. For example:
x = np.array(range(3))
np.pad(x, [0, 5], mode='symmetric')
-> array([0, 1, 2, 2, 1, 0, 0, 1])
x = tf
|
.constant(range(3))
tf.pad(x, [(0, 5)], mode='symmetric')
-> fails
"""
from typing import Union
import tensorflow as tf
def tf_pad(samples, padding,
mode):
if samples.shape.ndims != 2:
raise ValueError(f'tensor must be rank 2: {samples.shape}')
if mode == 'SYMMETRIC':
return tf_pad_symmetric(samples, padding)
else:
return tf.pad(samples, [(0, 0), (0, padding)], mode=mode)
def tf_pad_symmetric(tensor, padding):
"""Symmetric pad a 2D Tensor."""
if tensor.shape.ndims != 2:
raise ValueError(f'tensor must be rank 2: {tensor.shape}')
t_len = tf.shape(tensor)[1]
return tf.cond(
padding > t_len,
lambda: _repeat_n_times_with_extra(tensor, padding, t_len),
lambda: tf.pad(tensor, [(0, 0), (0, padding)], mode='SYMMETRIC'))
def _repeat_n_times_with_extra(tensor, padding,
t_len):
"""Pad symmetric longer than the original tensor."""
assert tensor.shape.ndims == 2, tensor.ndims
num_copies = tf.math.floordiv(padding, t_len)
r = tf.reverse(tensor, axis=[1])
f = tf.concat([r, tensor], axis=1)
copies = tf.tile(f, [1, tf.math.floordiv(num_copies, 2)])
copies = tf.cond(
tf.math.mod(num_copies, 2) == 0,
lambda: copies,
lambda: tf.concat([copies, r], axis=1),
)
pre_pad_tensor = tf.concat([tensor, copies], axis=1)
extra = tf.math.mod(padding, t_len)
return tf.pad(pre_pad_tensor, paddings=[(0, 0), (0, extra)], mode='SYMMETRIC')
|
harshilasu/LinkurApp
|
y/google-cloud-sdk/platform/gcutil/lib/google_compute_engine/gcutil_lib/mock_api_parser.py
|
Python
|
gpl-3.0
| 7,467
| 0.006964
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parsing API Discovery document."""
import mock_api_types
from gcutil_lib import mock_api_types
class Parser(object):
"""Discovery document parser.
Parses discovery document types, resources and methods. Result of parsing is a
dictionary method_id -> method.
"""
__slots__ = ('_discovery_document', '_parsed_schemas', '_parsed_methods',
'_base_url', '_common_parameters')
def __init__(self, doc):
self._discovery_document = doc
self._parsed_schemas = {}
self._parsed_methods = {}
self._base_url = ''
self._common_parameters = {}
def _ParseType(self, discovery_type):
ref = discovery_type.get('$ref')
if ref:
return self._GetSchema(ref)
type_name = discovery_type['type']
if type_name == 'any':
return mock_api_types.AnyType()
elif type_name == 'array':
return mock_api_types.ArrayType(self._ParseType(discovery_type['items']))
elif type_name == 'boolean':
return mock_api_types.BooleanType()
elif type_name == 'integer':
return self._ParseIntegerType(discovery_type)
elif type_name == 'number':
return self._ParseNumberType(discovery_type)
elif type_name == 'object':
return self._ParseObjectType(discovery_type)
elif type_name == 'string':
return self._ParseStringType(discovery_type)
else:
raise ValueError('Unrecognized type {type}'.format(type=type_name))
def _ParseIntegerType(self, discovery_type):
value_format = discovery_type.get('format')
if value_format in (None, 'int32', 'uint32'):
return mock_api_types.IntegerType(value_format or 'int32')
raise ValueError('Invalid integer format {value}'.format(
value=value_format))
def _ParseNumberType(self, discovery_type):
value_format = discovery_type.get('format')
if value_format in (None, 'double', 'float'):
return mock_api_types.NumberType(value_format or 'double')
raise ValueError('Invalid number format {value}'.format(
value=value_format))
def _ParseStringType(self, discovery_type):
value_format = discovery_type.get('format')
if value_format in (None, 'byte', 'date', 'date-time', 'int64', 'uint64'):
return mock_api_types.StringType(value_format)
raise ValueError('Invalid string format {value}'.format(
value=value_format))
def _ParseObjectType(self, discovery_type):
properties, additional = self._ParseProperties(discovery_type)
object_type = mock_api_types.ObjectType()
object_type.Define('', properties, additional)
return object_type
def _ParseSchema(self, discovery_schema):
properties, additional = self._ParseProperties(discovery_schema)
return self._CreateSchema(
discovery_schema.get('id'), properties, additional)
def _ParseProperties(self, discover
|
y_object_type):
"""Parses properties of a discovery document object tyoe."""
assert discovery_object_type.get('type') == 'object'
properties = []
for property_name, property_type in (
discovery_object_type.get('properties', {}).iteritems()):
properties.append(mock_api_types.Property(
property_
|
name, self._ParseType(property_type)))
additional = None
additional_properties = discovery_object_type.get('additionalProperties')
if additional_properties is not None:
additional = self._ParseType(additional_properties)
return properties, additional
def _ParseSchemas(self, discovery_schemas):
for _, discovery_schema in discovery_schemas.iteritems():
self._ParseSchema(discovery_schema)
def _ParseMethods(self, discovery_methods):
for method_name, discovery_method in discovery_methods.iteritems():
self._ParseMethod(method_name, discovery_method)
def _ParseParameter(self, parameter_name, parameter_type):
return mock_api_types.Parameter(
parameter_name, self._ParseType(parameter_type))
def _ParseParameters(self, discovery_method_parameters):
parameters = []
for parameter_name, parameter_type in (
discovery_method_parameters.iteritems()):
parameters.append(
self._ParseParameter(parameter_name, parameter_type))
parameters.sort(key=lambda parameter: parameter.name)
return parameters
def _ParseMethod(self, method_name, discovery_method):
parameters = self._ParseParameters(discovery_method.get('parameters', {}))
# Parse request type
discovery_method_request = discovery_method.get('request')
if discovery_method_request is None:
request_type = None
else:
request_type = self._ParseType(discovery_method_request)
# Parse response type.
discovery_method_response = discovery_method.get('response')
if discovery_method_response is None:
response_type = None
else:
response_type = self._ParseType(discovery_method_response)
return self._CreateMethod(
discovery_method.get('id'), method_name,
discovery_method.get('path', ''), parameters,
request_type, response_type)
def _ParseResources(self, discovery_resources):
for _, discovery_resource in discovery_resources.iteritems():
self._ParseResource(discovery_resource)
# Return all accumulated methods.
return self._parsed_methods
def _ParseResource(self, discovery_resource):
discovery_methods = discovery_resource.get('methods')
if discovery_methods:
self._ParseMethods(discovery_methods)
discovery_resources = discovery_resource.get('resources')
if discovery_resources:
self._ParseResources(discovery_resources)
def _ParseGlobals(self, discovery_document):
self._base_url = discovery_document.get('baseUrl')
self._common_parameters = self._ParseParameters(
discovery_document.get('parameters', {}))
def Parse(self):
self._ParseGlobals(self._discovery_document)
self._ParseSchemas(self._discovery_document.get('schemas'))
return self._ParseResources(self._discovery_document.get('resources'))
def _GetSchema(self, name):
schema = self._parsed_schemas.get(name)
if schema is None:
self._parsed_schemas[name] = schema = mock_api_types.ObjectType()
return schema
def _CreateSchema(self, name, properties, additional):
schema = self._GetSchema(name)
schema.Define(name, properties, additional)
return schema
def _CreateMethod(self, method_id, name, path, parameters, request, response):
if method_id in self._parsed_methods:
raise ValueError('Duplicate method {method}'.format(method=method_id))
all_parameters = dict((p.name, p) for p in self._common_parameters)
all_parameters.update(dict((p.name, p) for p in parameters))
path = self._base_url + path
method = mock_api_types.Method(
method_id, name, path, all_parameters, request, response)
self._parsed_methods[method_id] = method
return method
|
OCA/event
|
event_mail/models/__init__.py
|
Python
|
agpl-3.0
| 482
| 0
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import res_co
|
mpany
# WARNING: Order of imports matters on this module, so don't put res_company
# below the other modules since it will lead to a missing column error when
# the module is initialized for the first time since there are fields with
# default values wich refer to this new res.company field.
from . import event
from . import event_mail
from . import event_ty
|
pe
from . import res_config_settings
|
SalmonMode/contextional
|
contextional/test_resources/verbose_fixtures.py
|
Python
|
mit
| 7,418
| 0
|
from contextional import GCM
with GCM("A") as A:
@GCM.add_setup
def setUp():
pass
@GCM.add_teardown
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
raise Exception
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup
def setUp():
raise Exception
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown
def tearDown():
raise Exception()
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown
|
("teardown w/ description")
def tearD
|
own():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
@GCM.add_teardown
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup("setup w/ description")
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown("teardown w/ description")
def tearDown():
raise Exception()
@GCM.add_teardown("teardown w/ description")
def tearDown():
pass
A.create_tests()
with GCM("A") as A:
@GCM.add_setup
def setUp():
pass
@GCM.add_teardown
def tearDown():
pass
with GCM.add_group("B"):
@GCM.add_setup
def setUp():
pass
@GCM.add_test("some test")
def test(case):
pass
@GCM.add_teardown
def tearDown():
pass
A.create_tests()
expected_stream_output = [
"A",
" B",
" some test ... ok",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown w/ description ",
" # teardown w/ description ",
"A",
" # setup w/ description ERROR",
" B",
" some test ... FAIL",
" # teardown w/ description ",
"A",
" # setup (1/1) ERROR",
" B",
" some test ... FAIL",
" # teardown w/ description ",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown (1/2) ERROR",
" # teardown w/ description ",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown (1/2) ERROR",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown (1/2) ERROR",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown (1/2) ERROR",
" # teardown (1/2) ERROR",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown w/ description ",
" # teardown (2/3) ERROR",
" # teardown w/ description ",
"A",
" # setup w/ description ",
" B",
" # setup w/ description ",
" some test ... ok",
" # teardown w/ description ERROR",
" # teardown w/ description ",
"A",
" B",
" some test ... ok",
]
|
zzeleznick/zDjango
|
Poller/urls.py
|
Python
|
mit
| 396
| 0.002525
|
from django.conf.urls import include, url
from django.contrib import admin
from Poller import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>\d+)/$', views.DetailView.as_view(), name='detail'),
url(r'^(?P<pk>\d+)/results/$', views.ResultsView.as_view()
|
, name='results'),
url(r'^(?P<question_id>\d+)/vote/$', views.vote, name='vote'),
|
]
|
mainakibui/kobocat
|
onadata/apps/logger/management/commands/sync_deleted_instances_fix.py
|
Python
|
bsd-2-clause
| 1,551
| 0
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 fileencoding=utf-8
import json
from django.conf import settings
from django.core.management import BaseCommand
from django.utils import timezone
from django.utils.dateparse import parse_datetime
from django.utils.
|
translation import ugettext_lazy
from onadata.apps.logger.models import Instance
class Command(BaseCommand):
help = ugettext_lazy("Fixes deleted instances by syncing "
"deleted items from mongo.")
def handle(self, *args, **kwargs):
# Reset all sql deletes to None
Instance.ob
|
jects.exclude(
deleted_at=None, xform__downloadable=True).update(deleted_at=None)
# Get all mongo deletes
query = '{"$and": [{"_deleted_at": {"$exists": true}}, ' \
'{"_deleted_at": {"$ne": null}}]}'
query = json.loads(query)
xform_instances = settings.MONGO_DB.instances
cursor = xform_instances.find(query)
for record in cursor:
# update sql instance with deleted_at datetime from mongo
try:
i = Instance.objects.get(
uuid=record["_uuid"], xform__downloadable=True)
except Instance.DoesNotExist:
continue
else:
deleted_at = parse_datetime(record["_deleted_at"])
if not timezone.is_aware(deleted_at):
deleted_at = timezone.make_aware(
deleted_at, timezone.utc)
i.set_deleted(deleted_at)
|
hantek/BinaryConnect
|
cifar10.py
|
Python
|
gpl-2.0
| 9,943
| 0.023836
|
# Copyright 2015 Matthieu Courbariaux, Zhouhan Lin
"""
This file is adapted from BinaryConnect:
https://github.com/MatthieuCourbariaux/BinaryConnect
Running this script should reproduce the results trained on CIFAR10 shown in
the paper.
To train a vanilla ConvNet with ordinary backprop:
1. type "git checkout fullresolution" to switch to the "fullresolution" branch
2. execute "python cifar10.py"
To train a ConvNet with Binary Connect + quantized backprop:
1. type "git checkout binary" to switch to the "binary" branch
2. execute "python cifar10.py"
To train a ConvNet with Ternary Connect + quantized backprop:
1. type "git checkout ternary" to switch to the "ternary" branch
2. execute "python cifar10.py"
"""
import gzip
import cPickle
import numpy as np
import os
import os.path
import sys
import time
from trainer import Trainer
from model import Network
from layer import linear_layer, ReLU_layer, ReLU_conv_layer
from pylearn2.datasets.zca_dataset import ZCA_Dataset
from pylearn2.utils import serial
if __name__ == "__main__":
rng = np.random.RandomState(1234)
# data augmentation
zero_pad = 0
affine_transform_a = 0
affine_transform_b = 0
horizontal_flip = False
# batch
# keep a factor of 10000 if possible
# 10000 = (2*5)^4
batch_size = 100
number_of_batches_on_gpu = 45000/batch_size
BN = True
BN_epsilon=1e-4 # for numerical stability
BN_fast_eval= True
dropout_hidden = 1.
shuffle_examples = True
shuffle_batches = False
# Termination criteria
n_epoch = 300
monitor_step = 2
# LR
LR = .3
LR_fin = .001
LR_decay = (LR_fin/LR)**(1./n_epoch)
M= 0.
# BinaryConnect
BinaryConnect = True
stochastic = True
# Old hyperparameters
binary_training=False
stochastic_training=False
binary_test=False
stochastic_test=False
if BinaryConnect == True:
binary_training=True
if stochastic == True:
stochastic_training=True
else:
binary_test=True
print 'Loading the dataset'
preprocessor = serial.load("${PYLEARN2_DATA_PATH}/cifar10/pylearn2_gcn_whitened/preprocessor.pkl")
train_set = ZCA_Dataset(
preprocessed_dataset=serial.load("${PYLEARN2_DATA_PATH}/cifar10/pylear
|
n2_gcn_whitened/train.pkl"),
preprocessor = preprocessor,
start=0, stop = 45000)
valid_set = ZCA_Dataset(
preprocessed_dataset= serial.load("${PYLEARN2_DATA_PATH}/cifar10/pylearn2_gcn_whitened/train.pkl"),
preprocessor = preprocessor,
start=45000, stop = 50000)
test_s
|
et = ZCA_Dataset(
preprocessed_dataset= serial.load("${PYLEARN2_DATA_PATH}/cifar10/pylearn2_gcn_whitened/test.pkl"),
preprocessor = preprocessor)
# bc01 format
# print train_set.X.shape
train_set.X = train_set.X.reshape(45000,3,32,32)
valid_set.X = valid_set.X.reshape(5000,3,32,32)
test_set.X = test_set.X.reshape(10000,3,32,32)
# flatten targets
train_set.y = np.hstack(train_set.y)
valid_set.y = np.hstack(valid_set.y)
test_set.y = np.hstack(test_set.y)
# Onehot the targets
train_set.y = np.float32(np.eye(10)[train_set.y])
valid_set.y = np.float32(np.eye(10)[valid_set.y])
test_set.y = np.float32(np.eye(10)[test_set.y])
# for hinge loss
train_set.y = 2* train_set.y - 1.
valid_set.y = 2* valid_set.y - 1.
test_set.y = 2* test_set.y - 1.
print 'Creating the model'
class DeepCNN(Network):
def __init__(self, rng):
Network.__init__(self, n_hidden_layer = 8, BN = BN)
print " C3 layer:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(128, 3, 3, 3),
pool_shape=(1,1),
pool_stride=(1,1),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " C3 P2 layers:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(128, 128, 3, 3),
pool_shape=(2,2),
pool_stride=(2,2),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " C2 layer:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(256, 128, 2, 2),
pool_shape=(1,1),
pool_stride=(1,1),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " C2 P2 layers:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(256, 256, 2, 2),
pool_shape=(2,2),
pool_stride=(2,2),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " C2 layer:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(512, 256, 2, 2),
pool_shape=(1,1),
pool_stride=(1,1),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " C2 P2 layers:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(512, 512, 2, 2),
pool_shape=(2,2),
pool_stride=(2,2),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " C2 layer:"
self.layer.append(ReLU_conv_layer(
rng,
filter_shape=(1024, 512, 2, 2),
pool_shape=(1,1),
pool_stride=(1,1),
BN = BN,
BN_epsilon=BN_epsilon,
binary_training=binary_training,
stochastic_training=stochastic_training,
binary_test=binary_test,
stochastic_test=stochastic_test
))
print " FC layer:"
self.layer.append(ReLU_layer(
rng = rng,
n_inputs = 1024,
n_units = 1024,
BN = BN,
BN_epsilon=BN_epsilon,
dropout=dropout_hidden,
binary_training=binary_training,
|
wrobell/btzen
|
btzen/bluez.py
|
Python
|
gpl-3.0
| 1,239
| 0
|
#
# BTZen - library to asynchronously access Bluetooth devices.
#
# Copyright (C) 2015-2021 by Artur Wroblewski <wrobell@riseup.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your opti
|
on) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Bluetooth services implemented by BlueZ protocol stack.
"""
from .data import Make, ServiceType, Trigger, TriggerCondition
from .service import ServiceInterface, register_service
from .util import to_uuid
register_service(
Make.STANDARD,
ServiceType.BATTERY_LEVEL,
ServiceInterface(
to_uuid(0x180f),
'org.bluez.Battery1',
'Percentage',
'y'
),
trigger=Trigger(TriggerCondition.ON_CHANGE),
)
# vim: sw=4:et:ai
|
soltanmm-google/grpc
|
tools/run_tests/run_interop_tests.py
|
Python
|
bsd-3-clause
| 33,477
| 0.009111
|
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run interop (cross-language) tests in parallel."""
from __future__ import print_function
import argparse
import atexit
import itertools
import json
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import time
import uuid
import python_utils.dockerjob as dockerjob
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
# Docker doesn't clean up after itself, so we do it on exit.
atexit.register(lambda: subprocess.call(['stty', 'echo']))
ROOT = os.path.abspath(os.path.join(os.path.dirna
|
me(sys.argv[0]), '../..'))
os.c
|
hdir(ROOT)
_DEFAULT_SERVER_PORT=8080
_SKIP_CLIENT_COMPRESSION = ['client_compressed_unary',
'client_compressed_streaming']
_SKIP_SERVER_COMPRESSION = ['server_compressed_unary',
'server_compressed_streaming']
_SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION
_SKIP_ADVANCED = ['status_code_and_message',
'custom_metadata',
'unimplemented_method',
'unimplemented_service']
_TEST_TIMEOUT = 3*60
class CXXLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = 'cxx'
def client_cmd(self, args):
return ['bins/opt/interop_client'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['bins/opt/interop_server'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return []
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'c++'
class CSharpLanguage:
def __init__(self):
self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug'
self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug'
self.safename = str(self)
def client_cmd(self, args):
return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_SERVER_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'csharp'
class CSharpCoreCLRLanguage:
def __init__(self):
self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp1.0'
self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp1.0'
self.safename = str(self)
def client_cmd(self, args):
return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_SERVER_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'csharpcoreclr'
class JavaLanguage:
def __init__(self):
self.client_cwd = '../grpc-java'
self.server_cwd = '../grpc-java'
self.safename = str(self)
def client_cmd(self, args):
return ['./run-test-client.sh'] + args
def client_cmd_http2interop(self, args):
return ['./run-http2-client.sh'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['./run-test-server.sh'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'java'
class GoLanguage:
def __init__(self):
# TODO: this relies on running inside docker
self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
self.safename = str(self)
def client_cmd(self, args):
return ['go', 'run', 'client.go'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['go', 'run', 'server.go'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'go'
class Http2Server:
"""Represents the HTTP/2 Interop Test server
This pretends to be a language in order to be built and run, but really it
isn't.
"""
def __init__(self):
self.server_cwd = None
self.safename = str(self)
def server_cmd(self, args):
return ['python test/http2_test/http2_test_server.py']
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _TEST_CASES
def unimplemented_test_cases_server(self):
return _TEST_CASES
def __str__(self):
return 'http2'
class Http2Client:
"""Represents the HTTP/2 Interop Test
This pretends to be a language in order to be built and run, but really it
isn't.
"""
def __init__(self):
self.client_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _TEST_CASES
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'http2'
class NodeLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['tools/run_tests/interop/with_nvm.sh',
'node', 'src/node/interop/interop_client.js'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['tools/run_tests/interop/with_nvm.sh',
'node', 'src/node/interop/interop_server.js'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'node'
class PHPLanguage:
def __init__(self):
self.client_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['src/php/bin/interop_client.sh'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSI
|
gmist/ctm-5studio
|
main/auth/yahoo.py
|
Python
|
mit
| 2,148
| 0.008845
|
# coding: utf-8
from __future__ import absolute_import
import flask
import auth
import model
import util
from main import app
yahoo_config = dict(
access_token_url='https://api.login.yahoo.com/oauth/v2/get_token',
authorize_url='https://api.login.yahoo.com/oauth/v2/request_auth',
base_url='https://query.yahooapis.com/',
consumer_key=model.Config.get_master_db().yahoo_consumer_key,
consumer_secret=model.Config.get_master_db().yahoo_consumer_secret,
request_token_url='https://api.login.yahoo.com/oauth/v2/get_request_token',
)
yahoo = auth.create_oauth_app(yahoo_config, 'yahoo')
@app.route('/api/auth/callback/yah
|
oo/')
def yahoo_authorized():
response = yahoo.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (
response['oauth_token'],
response['oauth_token_secret'],
)
fields = 'guid, emails, familyName, givenName, nickname'
me = yahoo.get(
'/v1/yql',
data={
'format': 'json',
'q': 'select %s from s
|
ocial.profile where guid = me;' % fields,
'realm': 'yahooapis.com',
},
)
user_db = retrieve_user_from_yahoo(me.data['query']['results']['profile'])
return auth.signin_user_db(user_db)
@yahoo.tokengetter
def get_yahoo_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/yahoo/')
def signin_yahoo():
return auth.signin_oauth(yahoo)
def retrieve_user_from_yahoo(response):
auth_id = 'yahoo_%s' % response['guid']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
names = [response.get('givenName', ''), response.get('familyName', '')]
emails = response.get('emails', {})
if not isinstance(emails, list):
emails = [emails]
emails = [e for e in emails if 'handle' in e]
emails.sort(key=lambda e: e.get('primary', False))
email = emails[0]['handle'] if emails else ''
return auth.create_user_db(
auth_id=auth_id,
name=' '.join(names).strip() or response['nickname'],
username=response['nickname'],
email=email,
verified=bool(email),
)
|
widelands/widelands
|
cmake/codecheck/rules/assert0.py
|
Python
|
gpl-2.0
| 293
| 0
|
#!/usr/bin/env python -tt
# encoding: utf-8
#
"""Use a descriptive macro ins
|
tead of assert(fa
|
lse);"""
error_msg = 'Use NEVER_HERE() from base/macros.h here.'
regexp = r"""assert *\( *(0|false) *\)"""
forbidden = [
'assert(0)',
'assert(false)',
]
allowed = [
'NEVER_HERE()',
]
|
hperala/kontuwikibot
|
mwparserfromhell/string_mixin.py
|
Python
|
mit
| 4,073
| 0.000491
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software
|
"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copi
|
es of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module contains the :class:`.StringMixIn` type, which implements the
interface for the ``unicode`` type (``str`` on py3k) in a dynamic manner.
"""
from __future__ import unicode_literals
from sys import getdefaultencoding
from .compat import bytes, py26, py3k, str
__all__ = ["StringMixIn"]
def inheritdoc(method):
"""Set __doc__ of *method* to __doc__ of *method* in its parent class.
Since this is used on :class:`.StringMixIn`, the "parent class" used is
``str``. This function can be used as a decorator.
"""
method.__doc__ = getattr(str, method.__name__).__doc__
return method
class StringMixIn(object):
"""Implement the interface for ``unicode``/``str`` in a dynamic manner.
To use this class, inherit from it and override the :meth:`__unicode__`
method (same on py3k) to return the string representation of the object.
The various string methods will operate on the value of :meth:`__unicode__`
instead of the immutable ``self`` like the regular ``str`` type.
"""
if py3k:
def __str__(self):
return self.__unicode__()
def __bytes__(self):
return bytes(self.__unicode__(), getdefaultencoding())
else:
def __str__(self):
return bytes(self.__unicode__())
def __unicode__(self):
raise NotImplementedError()
def __repr__(self):
return repr(self.__unicode__())
def __lt__(self, other):
return self.__unicode__() < other
def __le__(self, other):
return self.__unicode__() <= other
def __eq__(self, other):
return self.__unicode__() == other
def __ne__(self, other):
return self.__unicode__() != other
def __gt__(self, other):
return self.__unicode__() > other
def __ge__(self, other):
return self.__unicode__() >= other
if py3k:
def __bool__(self):
return bool(self.__unicode__())
else:
def __nonzero__(self):
return bool(self.__unicode__())
def __len__(self):
return len(self.__unicode__())
def __iter__(self):
for char in self.__unicode__():
yield char
def __getitem__(self, key):
return self.__unicode__()[key]
def __reversed__(self):
return reversed(self.__unicode__())
def __contains__(self, item):
return str(item) in self.__unicode__()
def __getattr__(self, attr):
return getattr(self.__unicode__(), attr)
if py3k:
maketrans = str.maketrans # Static method can't rely on __getattr__
if py26:
@inheritdoc
def encode(self, encoding=None, errors=None):
if encoding is None:
encoding = getdefaultencoding()
if errors is not None:
return self.__unicode__().encode(encoding, errors)
return self.__unicode__().encode(encoding)
del inheritdoc
|
gEt-rIgHt-jR/voc
|
voc/python/types/python.py
|
Python
|
bsd-3-clause
| 8,387
| 0.000119
|
from ...java import opcodes as JavaOpcodes, Classref as JavaClassref
from . import java as Java
##########################################################################
# Python types and their operations
##########################################################################
class Callable:
class invoke:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Callable',
'invoke',
args=['[Lorg/python/Object;', 'Ljava/util/Map;'],
returns='Lorg/python/Object;'
),
)
class Iterable:
class next:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Iterable',
'__next__',
args=[],
returns='Lorg/python/Object;'
)
)
class Type:
class for_class:
def __init__(self, name):
self.name = name
def process(self, context):
context.add_opcodes(
JavaOpcodes.LDC_W(JavaClassref(self.name)),
JavaOpcodes.INVOKESTATIC(
'org/python/types/Type',
'pythonType',
args=['L
|
java/lang/Class;'],
returns='Lorg/python/types/Type;'
)
)
clas
|
s for_name:
def __init__(self, name):
self.name = name
def process(self, context):
context.add_opcodes(
JavaOpcodes.LDC_W(self.name),
JavaOpcodes.INVOKESTATIC(
'org/python/types/Type',
'pythonType',
args=['Ljava/lang/String;'],
returns='Lorg/python/types/Type;'
)
)
class to_python:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKESTATIC(
'org/python/types/Type',
'toPython',
args=['Ljava/lang/Object;'],
returns='Lorg/python/Object;'
)
)
class Object:
class get_attribute:
def __init__(self, attr, use_null=False):
self.attr = attr
self.use_null = use_null
def process(self, context):
context.add_opcodes(
JavaOpcodes.LDC_W(self.attr),
)
if self.use_null:
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__getattribute_null',
args=['Ljava/lang/String;'],
returns='Lorg/python/Object;'
),
)
else:
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__getattribute__',
args=['Ljava/lang/String;'],
returns='Lorg/python/Object;'
),
)
class set_attr:
def __init__(self, attr):
self.attr = attr
def process(self, context):
context.add_opcodes(
JavaOpcodes.LDC_W(self.attr),
JavaOpcodes.SWAP(),
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__setattr__',
args=['Ljava/lang/String;', 'Lorg/python/Object;'],
returns='V'
),
)
class del_attr:
def __init__(self, attr=None):
self.attr = attr
def process(self, context):
if self.attr:
context.add_opcodes(
JavaOpcodes.LDC_W(self.attr),
)
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__delattr__',
args=['Ljava/lang/String;'],
returns='V'
),
)
class get_item:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__getitem__',
args=['Lorg/python/Object;'],
returns='Lorg/python/Object;'
),
)
class set_item:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__setitem__',
args=['Lorg/python/Object;', 'Lorg/python/Object;'],
returns='V'
),
)
class del_item:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__delitem__',
args=['Lorg/python/Object;'],
returns='V'
),
)
class iter:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__iter__',
args=[],
returns='Lorg/python/Iterable;'
)
)
class as_boolean:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEINTERFACE(
'org/python/Object',
'__bool__',
args=[],
returns='Lorg/python/Object;'
),
JavaOpcodes.CHECKCAST('org/python/types/Bool'),
JavaOpcodes.GETFIELD('org/python/types/Bool', 'value', 'Z'),
)
class NONE:
def process(self, context):
context.add_opcodes(
JavaOpcodes.GETSTATIC('org/python/types/NoneType', 'NONE', 'Lorg/python/Object;')
)
class Dict:
def process(self, context):
context.add_opcodes(
Java.New('org/python/types/Dict'),
Java.Init('org/python/types/Dict')
)
class set_item:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEVIRTUAL(
'org/python/types/Dict',
'__setitem__',
args=['Lorg/python/Object;', 'Lorg/python/Object;'],
returns='V'
)
)
class Set:
def process(self, context):
context.add_opcodes(
Java.New('org/python/types/Set'),
Java.Init('org/python/types/Set')
)
class add:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEVIRTUAL(
'org/python/types/Set',
'add',
args=['Lorg/python/Object;'],
returns='Lorg/python/Object;'
),
JavaOpcodes.POP()
)
class List:
def process(self, context):
context.add_opcodes(
Java.New('org/python/types/List'),
Java.Init('org/python/types/List')
)
class append:
def process(self, context):
context.add_opcodes(
JavaOpcodes.INVOKEVIRTUAL(
'org/python/types/List',
'append',
args=['Lorg/python/Object;'],
returns='Lorg/python/Object;'
),
JavaOpcodes.POP()
)
class Str:
def __init__(self, value=None):
self.value = value
def process(self, context):
if self.value:
context.add_opcodes(
Java.New('org/python/types/Str'),
JavaOpcodes.LDC_W(self.value),
Java.Init('org/python/ty
|
chatcannon/scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_B.py
|
Python
|
bsd-3-clause
| 21,639
| 0.000185
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, cos, exp, log, arange, pi, roll, sin, sqrt, sum
from .go_benchmark import Benchmark
class BartelsConn(Benchmark):
r"""
Bartels-Conn objective function.
The BartelsConn [1]_ global optimization problem is a multimodal
minimization problem defined as follows:
.. math::
f_{\text{BartelsConn}}(x) = \lvert {x_1^2 + x_2^2 + x_1x_2} \rvert +
\lvert {\sin(x_1)} \rvert + \lvert {\cos(x_2)} \rvert
w
|
ith :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 1` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
|
self._bounds = zip([-500.] * self.N, [500.] * self.N)
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 1.0
def fun(self, x, *args):
self.nfev += 1
return (abs(x[0] ** 2.0 + x[1] ** 2.0 + x[0] * x[1]) + abs(sin(x[0]))
+ abs(cos(x[1])))
class Beale(Benchmark):
r"""
Beale objective function.
The Beale [1]_ global optimization problem is a multimodal
minimization problem defined as follows:
.. math::
f_{\text{Beale}}(x) = \left(x_1 x_2 - x_1 + 1.5\right)^{2} +
\left(x_1 x_2^{2} - x_1 + 2.25\right)^{2} + \left(x_1 x_2^{3} - x_1 +
2.625\right)^{2}
with :math:`x_i \in [-4.5, 4.5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x=[3, 0.5]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([-4.5] * self.N, [4.5] * self.N)
self.global_optimum = [[3.0, 0.5]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return ((1.5 - x[0] + x[0] * x[1]) ** 2
+ (2.25 - x[0] + x[0] * x[1] ** 2) ** 2
+ (2.625 - x[0] + x[0] * x[1] ** 3) ** 2)
class BiggsExp02(Benchmark):
r"""
BiggsExp02 objective function.
The BiggsExp02 [1]_ global optimization problem is a multimodal minimization
problem defined as follows
.. math::
\begin{matrix}
f_{\text{BiggsExp02}}(x) = \sum_{i=1}^{10} (e^{-t_i x_1}
- 5 e^{-t_i x_2} - y_i)^2 \\
t_i = 0.1 i\\
y_i = e^{-t_i} - 5 e^{-10t_i}\\
\end{matrix}
with :math:`x_i \in [0, 20]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = zip([0] * 2,
[20] * 2)
self.global_optimum = [[1., 10.]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
t = arange(1, 11.) * 0.1
y = exp(-t) - 5 * exp(-10 * t)
vec = (exp(-t * x[0]) - 5 * exp(-t * x[1]) - y) ** 2
return sum(vec)
class BiggsExp03(Benchmark):
r"""
BiggsExp03 objective function.
The BiggsExp03 [1]_ global optimization problem is a multimodal minimization
problem defined as follows
.. math::
\begin{matrix}\ f_{\text{BiggsExp03}}(x) = \sum_{i=1}^{10}
(e^{-t_i x_1} - x_3e^{-t_i x_2} - y_i)^2\\
t_i = 0.1i\\
y_i = e^{-t_i} - 5e^{-10 t_i}\\
\end{matrix}
with :math:`x_i \in [0, 20]` for :math:`i = 1, 2, 3`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 5]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = zip([0] * 3,
[20] * 3)
self.global_optimum = [[1., 10., 5.]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
t = arange(1., 11.) * 0.1
y = exp(-t) - 5 * exp(-10 * t)
vec = (exp(-t * x[0]) - x[2] * exp(-t * x[1]) - y) ** 2
return sum(vec)
class BiggsExp04(Benchmark):
r"""
BiggsExp04 objective function.
The BiggsExp04 [1]_ global optimization problem is a multimodal
minimization problem defined as follows
.. math::
\begin{matrix}\ f_{\text{BiggsExp04}}(x) = \sum_{i=1}^{10}
(x_3 e^{-t_i x_1} - x_4 e^{-t_i x_2} - y_i)^2\\
t_i = 0.1i\\
y_i = e^{-t_i} - 5 e^{-10 t_i}\\
\end{matrix}
with :math:`x_i \in [0, 20]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 1, 5]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = zip([0.] * 4,
[20.] * 4)
self.global_optimum = [[1., 10., 1., 5.]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
t = arange(1, 11.) * 0.1
y = exp(-t) - 5 * exp(-10 * t)
vec = (x[2] * exp(-t * x[0]) - x[3] * exp(-t * x[1]) - y) ** 2
return sum(vec)
class BiggsExp05(Benchmark):
r"""
BiggsExp05 objective function.
The BiggsExp05 [1]_ global optimization problem is a multimodal minimization
problem defined as follows
.. math::
\begin{matrix}\ f_{\text{BiggsExp05}}(x) = \sum_{i=1}^{11}
(x_3 e^{-t_i x_1} - x_4 e^{-t_i x_2} + 3 e^{-t_i x_5} - y_i)^2\\
t_i = 0.1i\\
y_i = e^{-t_i} - 5e^{-10 t_i} + 3e^{-4 t_i}\\
\end{matrix}
with :math:`x_i \in [0, 20]` for :math:`i=1, ..., 5`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 1, 5, 4]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self._bounds = zip([0.] * 5,
[20.] * 5)
self.global_optimum = [[1., 10., 1., 5., 4.]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
t = arange(1, 12.) * 0.1
y = exp(-t) - 5 * exp(-10 * t) + 3 * exp(-4 * t)
vec = (x[2] * exp(-t * x[0]) - x[3] * exp(-t * x[1])
+ 3 * exp(-t * x[4]) - y) ** 2
return sum(vec)
class Bird(Benchmark):
r"""
Bird objective function.
The Bird global optimization problem is a multimodal minimization
problem defined as follows
.. math::
f_{\text{Bird}}(x) = \left(x_1 - x_2\right)^{2} + e^{\left[1 -
\sin\left(x_1\right) \right]^{2}} \cos\left(x_2\right) + e^{\left[1 -
\cos\left(x_2\right)\right]^{2}} \sin\left(x_1\right)
with :math:`x_i \in [-2\pi, 2\pi]`
*Global optimum*: :math:`f(x) = -106.7645367198034` for :math:`x
= [4.701055751981055, 3.152946019601391]` or :math:`x =
[-1.582142172055011, -3.130246799635430]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark
|
apanda/NetBricks
|
scripts/tuning/read_cpu_dma_latency.py
|
Python
|
isc
| 588
| 0.022109
|
#!/usr/bin/python
import os
import signal
import struct
import sys
import time
A
|
LLOWED_INTERFACES = [ "cpu_dma_latency", "network_latency", "network_throughput" ]
def read_pmqos(name):
filename = "/dev/%s" % name
old = open(filename)
old_value = struct.unpack("i", old.read())[0]
print "PMQOS value for %s is %d"%(name, old_value)
if __name__=="__main__":
if len(sys.argv) < 2:
print "Must specify what to read"
sys.exit(1)
read = sys.argv[1]
if read not in
|
ALLOWED_INTERFACES:
print "Cannot read %s"%read
sys.exit(1)
read_pmqos(read)
|
yfede/gimp-plugin-export-layers
|
export_layers/pylibgimpplugin/tests/test_settings.py
|
Python
|
gpl-3.0
| 23,801
| 0.013361
|
#-------------------------------------------------------------------------------
#
# This file is part of pylibgimpplugin.
#
# Copyright (C) 2014 khalim19 <khalim19@gmail.com>
#
# pylibgimpplugin is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pylibgimpplugin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pylibgimpplugin. If not, see <http://www.gnu.org/licenses/>.
#
#-------------------------------------------------------------------------------
#===============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
str = unicode
#===============================================================================
import errno
from StringIO import StringIO
import json
import unittest
import gimpenums
from ..lib import mock
from . import gimpmocks
from .. import settings
from .. import libfiles
#===============================================================================
LIB_NAME = '.'.join(__name__.split('.')[:-2])
#===============================================================================
class MockStringIO(StringIO):
def read(self):
return self.getvalue()
class MockGuiWidget(object):
def __init__(self, value):
self.value = value
self.enabled = True
self.visible = True
class MockSettingPresenter(settings.SettingPresenter):
@property
def value(self):
return self._element.value
@value.setter
def value(self, val):
self._element.value = val
@property
def enabled(self):
return self._element.enabled
@enabled.setter
def enabled(self, val):
self._element.enabled = val
@property
def visible(self):
return self._element.visible
@visible.setter
def visible(self, val):
self._element.visible = val
def connect_event(self, event_func, *event_args):
pass
def set_tooltip(self):
pass
class MockSettingPresenterContainer(settings.SettingPresenterContainer):
def _gui_on_element_value_change(self, presenter):
self._on_element_value_change(presenter)
def _gui_on_element_value_change_streamline(self, presenter):
self._on_element_value_change(presenter)
class SettingContainerTest(settings.SettingContainer):
def _create_settings(self):
self._add(settings.StringSetting('file_extension', ""))
self._add(settings.BoolSetting('ignore_invisible', False))
self._add(
settings.EnumSetting(
'overwrite_mode', 'rename_new',
[('replace', "Replace"),
('skip', "Skip"),
('rename_new', "Rename new file"),
('rename_existing', "Rename existing file")])
)
self['file_extension'].set_streamline_func(streamline_file_extension, self['ignore_invisible'])
self['overwrite_mode'].set_streamline_func(streamline_overwrite_mode, self['ignore_invisible'], self['file_extension'])
def streamline_file_extension(file_extension, ignore_invisible):
if ignore_invisible.value:
file_extension.value = "png"
file_extension.ui_enabled = False
else:
file_extension.value = "jpg"
file_extension.ui_enabled = True
def streamline_overwrite_mode(overwrite_mode, ignore_invisible, file_extension):
if ignore_invisible.value:
overwrite_mode.value = overwrite_mode.options['skip']
file_extension.error_messages['custom'] = "custom error message"
else:
overwrite_mode.value = overwrite_mode.options['replace']
file_extension.error_messages['custom'] = "different custom error message"
#===============================================================================
class TestSetting(unittest.TestCase):
def setUp(self):
self.setting = settings.Setting('file_extension
|
', "")
def test_changed_attributes(self):
for attr, val in [('value', "png"), ('ui_enabled', False), ('ui_visible', True)]:
setattr(self.setting,
|
attr, val)
for attr in ['value', 'ui_enabled', 'ui_visible']:
self.assertTrue(attr in self.setting.changed_attributes,
msg=("'" + attr + "' not in " + str(self.setting.changed_attributes)))
def test_can_be_registered_to_pdb(self):
self.setting.gimp_pdb_type = gimpenums.PDB_INT32
self.assertEqual(self.setting.can_be_registered_to_pdb, True)
self.setting.gimp_pdb_type = None
self.assertEqual(self.setting.can_be_registered_to_pdb, False)
with self.assertRaises(ValueError):
self.setting.gimp_pdb_type = None
self.setting.can_be_registered_to_pdb = True
def test_reset(self):
setting = settings.Setting('file_extension', "")
setting.value = "png"
setting.reset()
self.assertEqual(setting.value, "")
def test_set_remove_streamline_func(self):
with self.assertRaises(TypeError):
self.setting.remove_streamline_func()
with self.assertRaises(TypeError):
self.setting.set_streamline_func(None)
with self.assertRaises(TypeError):
self.setting.set_streamline_func("this is not a function")
def test_invalid_streamline(self):
with self.assertRaises(TypeError):
self.setting.streamline()
def test_can_streamline(self):
self.setting.set_streamline_func(streamline_file_extension)
self.assertTrue(self.setting.can_streamline)
self.setting.remove_streamline_func()
self.assertFalse(self.setting.can_streamline)
def test_streamline(self):
ignore_invisible = settings.BoolSetting('ignore_invisible', False)
self.setting.value = "gif"
self.setting.set_streamline_func(streamline_file_extension, ignore_invisible)
changed_settings = self.setting.streamline()
self.assertTrue(self.setting in changed_settings)
self.assertTrue('ui_enabled' in changed_settings[self.setting])
self.assertTrue('value' in changed_settings[self.setting])
self.assertEqual(self.setting.ui_enabled, True)
self.assertEqual(self.setting.value, "jpg")
def test_streamline_force(self):
ignore_invisible = settings.BoolSetting('ignore_invisible', False)
self.setting.set_streamline_func(streamline_file_extension, ignore_invisible)
changed_settings = self.setting.streamline()
self.assertEqual({}, changed_settings)
changed_settings = self.setting.streamline(force=True)
self.assertTrue(self.setting in changed_settings)
class TestIntSetting(unittest.TestCase):
def setUp(self):
self.setting = settings.IntSetting('count', 0)
self.setting.min_value = 0
self.setting.max_value = 100
def test_below_min(self):
with self.assertRaises(settings.SettingValueError):
self.setting.value = -5
def test_above_max(self):
with self.assertRaises(settings.SettingValueError):
self.setting.value = 200
class TestFloatSetting(unittest.TestCase):
def setUp(self):
self.setting = settings.FloatSetting('clip_percent', 0.0)
self.setting.min_value = 0.0
self.setting.max_value = 100.0
def test_below_min(self):
with self.assertRaises(settings.SettingValueError):
self.setting.value = -5.0
try:
self.setting.value = 0.0
except settings.SettingValueError:
self.fail("`SettingValueError` should not be raised")
def test_above_max(self):
with self.assertRaises(settings.SettingValueError):
self.setting.value = 200.0
try:
self.setting.value = 100.0
except settings.SettingValueError:
self.fail("`SettingValueError` should not be raised")
class TestEnumSetting(unittest.TestCase):
def setUp(self):
self.setting_display_name = "Overwrite mode (non-interactive only)"
self.setting = settings.EnumSetting(
'overwrite_
|
xaratustrah/iq_suite
|
iqtools/tools.py
|
Python
|
gpl-2.0
| 10,337
| 0.000871
|
"""
Collection of tools for the IQTools library
Xaratustrah
2017
"""
import os
import logging as log
from scipy.signal import hilbert
from scipy.io import wavfile
import xml.etree.ElementTree as et
import numpy as np
import types
import uproot3
import uproot3_methods.classes.TH1
from iqtools.iqbase import IQBase
from iqtools.tcapdata import TCAPData
from iqtools.tdmsdata import TDMSData
from iqtools.bindata import BINData
from iqtools.iqtdata import IQTData
from iqtools.tiqdata import TIQData
from iqtools.csvdata import CSVData
from iqtools.wavdata import WAVData
from iqtools.xdatdata import XDATData
# ------------ TOOLS ----------------------------
def get_iq_object(filename, header_filename=None):
"""
Return suitable object accorting to extension.
Parameters
----------
filename
Returns
-------
"""
# Object generation
_, file_extension = os.path.splitext(filename)
iq_data = None
if file_extension.lower() == '.txt' or file_extension.lower() == '.csv':
log.info('This is an ASCII file.')
iq_data = CSVData(filename)
if file_extension.lower() == '.bin':
log.info('This is a raw binary file.')
iq_data = BINData(filename)
if file_extension.lower() == '.wav':
log.info('This is a wav file.')
iq_data = WAVData(filename)
if file_extension.lower() == '.iqt':
log.info('This is an iqt file.')
iq_data = IQTData(filename)
if file_extension.lower() == '.iq':
log.info('This is an iq file.')
iq_data = IQTData(filename)
if file_extension.lower() == '.tiq':
log.info('This is a tiq file.')
iq_data = TIQData(filename)
if file_extension.lower() == '.tdms':
log.info('This is a TDMS file.')
iq_data = TDMSData(filename)
if file_extension.lower() == '.dat':
log.info('This is a TCAP file.')
if not header_filename:
log.info('TCAP files need a text header file as well. Aborting....')
return None
else:
iq_data = TCAPData(filename, header_filename)
if file_extension.lower() == '.xdat':
log.info('This is a XDAT file.')
if not header_filename:
log.info('XDAT files need a text header file as well. Aborting....')
return None
else:
iq_data = XDATData(filename, header_filename)
return iq_data
def get_eng_notation(value, unit='', decimal_place=2):
"""
Convert numbers to scientific notation
Parameters
----------
value input number float or integer
decimal_place How many decimal places should be left
unit The unit will be shown, otherwise powers of ten
Returns
-------
"""
ref = {24: 'Y', 21: 'Z', 18: 'E', 15: 'P',
12: 'T', 9: 'G', 6: 'M', 3: 'k', 0: '',
-3: 'm', -6: 'u', -9: 'n', -12: 'p',
-15: 'f', -18: 'a', -21: 'z', -24: 'y',
}
if value == 0:
return '{}{}'.format(0, unit)
flag = '-' if value < 0 else ''
num = max([key for key in ref.keys() if abs(value) >= 10 ** key])
if num == 0:
mult = ''
else:
mult = ref[num] if unit else 'e{}'.format(num)
return '{}{}{}{}'.format(flag, int(abs(value) / 10 ** num * 10 ** decimal_place) / 10 ** decimal_place, mult,
unit)
def get_cplx_spectrogram(x, nframes, lframes):
sig = np.reshape(x, (nframes, lframes))
zz = np.fft.fft(sig, axis=1)
return zz
def get_inv_cplx_spectrogram(zz, nframes, lframes):
inv_zz = np.fft.ifft(zz, axis=1)
inv_zz = np.reshape(inv_zz, (1, nframes * lframes))[0]
return inv_zz
def get_root_th2d(xx, yy, zz, name='', title=''):
from ROOT import TH2D
h = TH2D(name, title, np.shape(xx)[
1], xx[0, 0], xx[0, -1], np.shape(yy)[0], yy[0, 0], yy[-1, 0])
for j in range(np.shape(yy)[0]):
for i in range(np.shape(xx)[1]):
h.SetBinContent(i, j, zz[j, i])
return h
def make_test_signal(f, fs, length=1, nharm=0, noise=False):
"""Make a sine signal with/without noise."""
t = np.arange(0, length, 1 / fs)
x = np.zeros(len(t))
for i in range(nharm + 2):
x += np.sin(2 * np.pi * i * f * t)
if noise:
x += np.random.normal(0, 1, len(t))
return t, x
def shift_phase(x, phase):
"""
Shift phase in frequency domain
x: complex or analytical signal
phase: amount in radians
returns: shifted complex signal
"""
XX = np.fft.fft(x)
angle = np.unwrap(np.angle(XX)) + phase
YY = np.abs(XX) * np.exp(1j * angle)
return np.fft.ifft(YY)
def write_signal_to_bin(cx, filename, fs=1, center=0, write_header=True):
"""
filename: name of the output filename
x: data vec
|
tor to write to filename
fs: sampling Frequency
center: center Frequency
write_header: if set to true, then the first 4 bytes of the file are 32-bit
sampling Frequency and then follows the center frequency also in 32-bit. the
Data follows afterwards in I, Q format each 32-bit as well.
"""
# 32-bit little endian fl
|
oats
# insert header
if write_header:
cx = np.insert(cx, 0, complex(fs, center))
cx = cx.astype(np.complex64)
cx.tofile(filename + '.bin')
def write_signal_to_csv(filename, cx, fs=1, center=0):
# insert ascii header which looks like a complex number
cx = np.insert(cx, 0, complex(fs, center))
with open(filename + '.csv', 'w') as f:
for i in range(len(cx)):
f.write('{}|{}\n'.format(
np.real(cx[i]), np.imag(cx[i])))
def write_signal_to_wav(filename, cx, fs=1):
""" Save the singal as an audio wave """
wavfile.write(filename + '.wav', fs,
abs(cx) / max(abs(cx)))
def make_analytical(x):
"""Make an analytical signal from the real signal"""
yy = hilbert(x)
ii = np.real(yy)
qq = np.imag(yy)
x_bar = np.vectorize(complex)(ii, qq)
ins_ph = np.angle(x_bar) * 180 / np.pi
return x_bar, ins_ph
def read_result_csv(filename):
"""
Read special format CSV result file from RSA5000 series output
:param filename:
:return:
"""
p = np.genfromtxt(filename, skip_header=63)
with open(filename) as f:
cont = f.readlines()
for l in cont:
l = l.split(',')
if 'Frequency' in l and len(l) == 3:
center = float(l[1])
if 'XStart' in l and len(l) == 3:
start = float(l[1])
if 'XStop' in l and len(l) == 3:
stop = float(l[1])
f = np.linspace(start - center, stop - center, len(p))
return f, p
def read_specan_xml(filename):
"""
Read the resulting saved trace file Specan from the Tektronix RSA5000 series
these files are produced while saving traces.
:param filename:
:return:
"""
with open(filename, 'rb') as f:
ba = f.read()
xml_tree_root = et.fromstring(ba)
for elem in xml_tree_root.iter(tag='Count'):
count = int(elem.text)
for elem in xml_tree_root.iter(tag='XStart'):
start = float(elem.text)
for elem in xml_tree_root.iter(tag='XStop'):
stop = float(elem.text)
for elem in xml_tree_root.iter(tag='XUnits'):
xunits = elem.text
for elem in xml_tree_root.iter(tag='YUnits'):
yunits = elem.text
for elem in xml_tree_root.iter(tag='y'):
pwr = float(elem.text)
p = np.zeros(count)
i = 0
for elem in xml_tree_root.iter(tag='y'):
p[i] = float(elem.text)
i += 1
f = np.linspace(start, stop, count)
return f, p, (xunits, yunits)
def read_data_csv(filename):
"""
Read special format CSV data file from RSA5100 series output.
Please note that 50 ohm power termination is already considered
for these data.
:param filename:
:return:
"""
data = np.genfromtxt(filename, skip_header=10, delimiter=",")
# has one dimension more, should use ravel
data = np.ravel(data).view(dtype='c16')
return data
def parse_filename(filename):
"""
Parses filenames of experimental data in the following format:
58Ni26+_374MeVu
|
amolkahat/pandas
|
pandas/core/groupby/generic.py
|
Python
|
bsd-3-clause
| 58,252
| 0.000223
|
"""
Define the SeriesGroupBy, DataFrameGroupBy, and PanelGroupBy
classes that hold the groupby interfaces (and some implementations).
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
import collections
import copy
import warnings
from functools import partial
from textwrap import dedent
import numpy as np
import pandas.core.algorithms as algorithms
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas import compat
from pandas._libs import Timestamp, lib
from pandas.compat import lzip, map
from pandas.compat.numpy import _np_version_under1p13
from pandas.core.arrays import Categorical
from pandas.core.base import DataError, SpecificationError
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
ensure_int64, ensure_platform_int, is_bool, is_datetimelike,
is_integer_dtype, is_interval_dtype, is_numeric_dtype, is_scalar
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy, _apply_docs, _transform_template
)
from pandas.core.index import CategoricalIndex, Index, MultiIndex
from pandas.core.internals import BlockManager, make_block
from pandas.core.panel import Panel
from pandas.core.series import Series
from pandas.plotting._core import boxplot_frame_groupby
from pandas.util._decorators import Appender, Substitution
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, alt=None, numeric_only=True,
min_count=-1):
new_items, new_blocks = self._cython_agg_blocks(
how, alt=alt, numeric_only=numeric_only, min_count=min_count)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, alt=None, numeric_only=True,
min_count=-1):
# TODO: the actual managing of mgr_locs is a PITA
# here, it should happen via BlockManager.combine
data, agg_axis = self._get_data_to_aggregate()
if numeric_only:
data = data.get_numeric_data(copy=False)
new_blocks = []
new_items = []
deleted_items = []
for block in data.blocks:
locs = block.mgr_locs.as_array
try:
result, _ = self.grouper.aggregate(
block.values, how, axis=agg_axis, min_count=min_count)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
if alt is None:
# we cannot perform the operation
# in an alternate way, exclude the block
deleted_items.append(locs)
continue
# call our grouper again with only this block
from pandas.core.groupby.groupby import groupby
obj = self.obj[data.items[locs]]
s = groupby(obj, self.grouper)
result = s.aggregate(lambda x: alt(x, axis=self.axis))
finally:
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = block.make_block(result)
new_items.append(locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
# reset the locs in the blocks to correspond to our
# current ordering
indexer = np.concatenate(new_items)
new_items = data.items.take(np.sort(indexer))
if len(deleted_items):
# we need to adjust the indexer to account for the
# items we have removed
# really should be done in internals :<
deleted = np.concatenate(deleted_items)
ai = np.arange(len(data))
mask = np.zeros(len(data))
mask[deleted] = 1
indexer = (ai - mask.cumsum())[indexer]
offset = 0
for b in new_blocks:
loc = len(b.mgr_locs)
b.mgr_locs = indexer[offset:(offset + loc)]
offset += loc
return new_items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
def aggregate(self, arg, *args, **kwargs):
_level = kwargs.pop('_level', None)
result, how = self._aggregate(arg, _level=_level, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[arg], _level=_level, _axis=self.axis)
result.columns = Index(
result.columns.levels[0],
name=self._selected_obj.columns.name)
except Exception:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result._convert(datetime=True)
agg = aggregate
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if
|
axis != obj._info_axis_number:
try:
for name, data in self:
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name]
|
= self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated
|
harmsm/thefarm
|
thefarm/farm.py
|
Python
|
mit
| 853
| 0.009379
|
import json, logging
class Farm:
|
"""
Main class holding the who
|
le farm.
"""
def __init__(self,json_file):
"""
"""
# load in json file
data = json.loads(open(json_file,'r').read())
# Make sure required data is in the json file
required_attr_list = ["latitude","longitude"]
for a in required_attr_list:
if a not in data.keys():
err = "Json file does not have all required data. Missing {}\n".format(a)
raise ValueError(err)
# parse resulting json
for k in data.keys():
self.__setattr__("_{}".format(k),data[k])
# Get the utc offset for our current time
self._utc_offset = datetime.now() - datetime.utcnow()
# get the current sunrise, sunset etc.
self._get_suntimes()
|
hidat/audio_pipeline
|
audio_pipeline/test/TagFormatTest.py
|
Python
|
mit
| 12,551
| 0.006454
|
import os
import unittest
import mutagen
import shutil
import audio_pipeline.test.References as ref
from . import TestUtil
from ..util import format
vorbis_files = dict(t1=os.path.join(ref.format_testing_audio, "t1.flac"),
picard=os.path.join(ref.format_testing_audio, "picard.flac"),
unknown=os.path.join(ref.format_testing_audio, "unknown.flac"),
to_write=os.path.join(ref.write_testing_audio, "unknown.flac"),
copy_to=os.path.join(ref.write_testing_audio, "unknown_copy.flac"))
aac_files = dict(t1=os.path.join(ref.format_testing_audio, "t1.m4a"),
picard=os.path.join(ref.format_testing_audio, "picard.m4a"),
unknown=os.path.join(ref.format_testing_audio, "unknown.m4a"),
to_write=os.path.join(ref.write_testing_audio, "unknown.m4a"),
copy_to=os.path.join(ref.write_testing_audio, "unknown_copy.m4a"))
id3_files = dict(t1=os.path.join(ref.format_testing_audio, "t1.mp3"),
picard=os.path.join(ref.format_testing_audio, "picard.mp3"),
unknown=os.path.join(ref.format_testing_audio, "unknown.mp3"),
to_write=os.path.join(ref.write_testing_audio, "unknown.mp3"),
copy_to=os.path.join(ref.write_testing_audio, "unknown_copy.mp3"))
class TestReadGenericTags(TestUtil.TestUtilMixin):
def test_artist_name(self):
self.check_tag(self.format.album_artist, self.tags.get("albumartist"))
def test_mbid(self):
self.check_tag(self.format.mbid, self.tags.get("mbid"))
def test_album(self):
self.check_tag(self.format.album, self.tags.get("album"))
def test_release_date(self):
self.check_tag(self.format.release_date, self.tags.get("date"))
def test_title(self):
self.check_tag(self.format.title, self.tags.get("title"))
def test_artist(self):
self.check_tag(self.format.artist, self.tags.get("artist"))
def test_disc_num(self):
self.check_tag(self.format.disc_num, self.tags.get("discnumber"))
def test_track_num(self):
self.check_tag(self.format.track_num, self.tags.get("tracknumber"))
def test_length(self):
self.check_tag(self.format.length, self.tags.get("length"))
#################
# test tag equality
#################
class TestTagEquality(TestUtil.TestUtilMixin, unittest.TestCase):
vorbis = format.Vorbis.Format
id3 = format.ID3.Format
aac = format.AAC.Format
vorbis_t1 = mutagen.File(vorbis_files["t1"])
vorbis_picard = mutagen.File(vorbis_files["picard"])
vorbis_unknown = mutagen.File(vorbis_files["unknown"])
aac_t1 = mutagen.File(aac_files["t1"])
aac_picard = mutagen.File(aac_files["picard"])
aac_unknown = mutagen.File(aac_files["unknown"])
id3_t1 = mutagen.File(id3_files["t1"])
id3_picard = mutagen.File(id3_files["picard"])
id3_unknown = mutagen.File(id3_files["unknown"])
def test_artist_name(self):
vorbis_tag_type = self.vorbis.album_artist
aac_tag_type = self.aac.album_artist
id3_tag_type = self.id3.album_artist
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_mbid(self):
vorbis_tag_type = self.vorbis.mbid
aac_tag_type = self.aac.mbid
id3_tag_type = self.id3.mbid
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_album(self):
vorbis_tag_type = self.vorbis.album
aac_tag_type = self.aac.album
id3_tag_type = self.id3.album
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_release_date(self):
vorbis_tag_type = self.vorbis.release_date
aac_tag_type = self.aac.release_date
id3_tag_type = self.id3.release_date
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_title(self):
vorbis_tag_type = self.vorbis.title
aac_tag_type = self.aac.title
id3_tag_type = self.id3.title
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_artist(self):
vorbis_tag_type = self.vorbis.artist
aac_tag_type = self.aac.artist
id3_tag_type = self.id3.artist
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_disc_num(self):
vorbis_tag_type = self.vorbis.disc_num
aac_tag_type = self.aac.disc_num
id3_tag_type = self.id3.disc_num
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_track_num(self):
vorbis_tag_type = self.vorbis.track_num
aac_tag_type = self.aac.track_num
id3_tag_type = self.id3.track_num
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def test_length(self):
vorbis_tag_type = self.vorbis.length
aac_tag_type = self.aac.length
id3_tag_type = self.id3.length
self.check_tag_equality(vorbis_tag_type, aac_tag_type, id3_tag_type)
def check_tag_equality(self, vorbis_tag_type, aac_tag_type, id3_tag_type):
vorbis_tag = vorbis_tag_type(self.vorbis_t1)
aac_tag = aac_tag_type(self.aac_t1)
id3_tag = id3_tag_type(self.id3_t1)
msg = "Testing equality of t1 " + vorbis_tag.name + ": "
self.check_equality(vorbis_tag, aac_tag, id3_tag, msg)
vorbis_tag = vorbis_tag_type(self.vorbis_picard)
aac_tag = aac_tag_type(self.aac_picard)
id3_tag = id3_tag_type(self.id3_picard)
msg = "Testing equality of picard " + vorbis_tag.name + ": "
self.check_equality(vorbis_tag, aac_tag, id3_tag, msg)
vorbis_tag = vorbis_tag_type(self.vorbis_unknown)
aac_tag = aac_tag_type(self.aac_unknown)
id3_tag = id3_tag_type(self.id3_unknown)
msg = "Testing equality of unknown " + vorbis_tag.name + ": "
self.check_equality(vorbis_tag, aac_tag, id3_tag, msg)
###################
# Test writing tags
###################
class TestTagWriteToEmptyFile(TestUtil.TestUtilMixin):
def test_artist_name(self):
self.write_test(self.format.album_artist, "albumartist")
def test_mbid(self):
self.write_test(self.format.mbid, "mbid")
def test_album(self):
self.write_test(self.format.album, "album")
def test_release_date(self):
self.write_test(self.format.release_date, "date")
def test_title(self):
self.write_test(self.format.title, "title")
def test_artist(self):
self.write_test(self.format.artist, "artist")
def test_disc_num(self):
self.write_test(self.format.disc_num, "discnumber")
def test_track_num(self):
self.write_test(self.format.track_num, "tracknumber")
def write_test(self, tag_builder, tag_name):
correct_tag = self.tags.get(tag_name)
tag = tag_builder(self.meta)
self.check_tag(tag_builder, None)
tag.value = correct_tag
tag.save()
self.meta = mutagen.File(self.file_name)
self.check_tag(tag_builder, correct_tag)
#############
# Vorbis format tests
#############
class TestReadGenericTagsVorbis_t1(TestReadGenericTags, unittest.TestCase):
@classmethod
def setUpClass(self):
self.meta = mutagen.File(vorbis_files["t1"])
self.tags = ref.t1_tags
self.format = format.Vorbis.Format
|
class TestReadGenericTag
|
sVorbis_picard(TestReadGenericTags, unittest.TestCase):
@classmethod
def setUpClass(self):
self.meta = mutagen.File(vorbis_files["picard"])
self.tags = ref.picard_tags
self.format = format.Vorbis.Format
class TestReadGenericTagsVorbis_NOMETA(TestReadGenericTags, unittest.TestCase):
@classmethod
def setUpClass(self):
|
davelab6/pyfontaine
|
fontaine/charsets/noto_glyphs/notokufiarabic_regular.py
|
Python
|
gpl-3.0
| 33,355
| 0.022875
|
# -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoKufiArabic-Regular'
native_name = ''
def glyphs(self):
glyphs = []
glyphs.append(0x0261) #uni0759.fina
glyphs.append(0x007F) #uni0625
glyphs.append(0x00D4) #uni0624
glyphs.append(0x0005) #uni0627
glyphs.append(0x00E2) #uni0626
glyphs.append(0x0004) #uni0621
glyphs.append(0x007D) #uni0623
glyphs.append(0x0081) #uni0622
glyphs.append(0x009C) #uni0686.medi
glyphs.append(0x0099) #uni0629
glyphs.append(0x0085) #uni0628
glyphs.append(0x0267) #uni075C.fina
glyphs.append(0x0256) #uni0756.init
glyphs.append(0x007E) #uni0623.fina
glyphs.append(0x0173) #uni069A.init
glyphs.append(0x01A9) #uni06AB.init
glyphs.append(0x02B8) #wavyhamza_above
glyphs.append(0x00C8) #veh.fina
glyphs.append(0x0192) #uni06A3.init
glyphs.append(0x02C3) #uni06C8.fina
glyphs.append(0x01BC) #uni06B1.fina
glyphs.append(0x020E) #uni06AD.finamorocco
glyphs.append(0x0008) #uni062D.init
glyphs.append(0x00DE) #uni06CC.medi
glyphs.append(0x00A1) #uni062E
glyphs.append(0x0007) #uni062D
glyphs.append(0x000B) #uni062F
glyphs.append(0x008D) #uni062A
glyphs.append(0x0129) #uni067B.init
glyphs.append(0x009D) #uni062C
glyphs.append(0x0091) #uni062B
glyphs.append(0x00E8) #uni06440625.isol
glyphs.append(0x0279) #uni0760.medi
glyphs.append(0x02A3) #uni076B.fina
glyphs.append(0x01D0) #uni06B6.fina
glyphs.append(0x01E7) #uni06BF
glyphs.append(0x0072) #uni066E.init
glyphs.append(0x00E5) #uni0626.fina
glyphs.append(0x025C) #uni0758
glyphs.append(0x01E0) #uni06BB.fina
glyphs.append(0x0284) #uni0763.init
glyphs.append(0x01C5) #uni06B3.init
glyphs.append(0x00DB) #uni064A.fina
glyphs.append(0x0033) #uni06440627.fina
glyphs.append(0x0189) #uni06A0.init
glyphs.append(0x017A) #uni069C.fina
glyphs.append(0x0134) #uni067F.fina
glyphs.append(0x0101) #dammatan_01
glyphs.append(0x0216) #uni06B50627.fina
glyphs.append(0x0036) #uni066E.fina
glyphs.append(0x02CC) #uni06CE.init
glyphs.append(0x0075) #beh_dotless_alt.medi
glyphs.append(0x02A0) #uni076A.init
glyphs.append(0x0108) #Ghunna_above
glyphs.append(0x0027) #uni0645.init
glyphs.append(0x0031) #uni0649.fina
glyphs.append(0x02C6) #uni06CA
glyphs.append(0x0073) #uni066E.medi
glyphs.append(0x026D) #uni075D.medi
glyphs.append(0x02E1) #uni060D
glyphs.append(0x01BD) #uni06B1.init
glyphs.append(0x02DD) #uni06DD
glyphs.append(0x0257) #uni0756.medi
glyphs.append(0x0281) #uni0762.medi
glyphs.append(0x017F) #uni069D.init
glyphs.append(0x023E) #uni0750.init
glyphs.append(0x015A) #uni068C.fina
glyphs.append(0x005A) #uni06BA.fina
glyphs.appen
|
d(0x018A) #uni06A0.medi
|
glyphs.append(0x01AC) #uni06AC.fina
glyphs.append(0x018E) #uni06A2.init
glyphs.append(0x0088) #uni0628.fina
glyphs.append(0x00F0) #uni06C2.fina
glyphs.append(0x0196) #uni06A4.medi
glyphs.append(0x0295) #uni0767.medi
glyphs.append(0x0141) #uni0682.init
glyphs.append(0x0062) #uni064B
glyphs.append(0x0265) #uni075B.fina
glyphs.append(0x02E5) #threedots_alt1.below
glyphs.append(0x02CD) #uni06CE.medi
glyphs.append(0x02D5) #uni06D1.fina
glyphs.append(0x01F5) #uni06DB
glyphs.append(0x0138) #uni0680.fina
glyphs.append(0x0277) #uni0760.fina
glyphs.append(0x0133) #uni067F
glyphs.append(0x0260) #uni0759
glyphs.append(0x012F) #uni067D
glyphs.append(0x0089) #uni067E
glyphs.append(0x0127) #uni067B
glyphs.append(0x012B) #uni067C
glyphs.append(0x0123) #uni067A
glyphs.append(0x00EE) #heh_ae.fina
glyphs.append(0x019A) #uni06A5.medi
glyphs.append(0x00D5) #uni0624.fina
glyphs.append(0x02AD) #twodots.vert.below
glyphs.append(0x01D9) #uni06B8.init
glyphs.append(0x02EF) #threedots_alt2.above
glyphs.append(0x008B) #uni067E.init
glyphs.append(0x01FC) #uni06E5
glyphs.append(0x01FD) #uni06E6
glyphs.append(0x00A4) #uni062E.fina
glyphs.append(0x02DF) #uni06E0
glyphs.append(0x01F8) #uni06E1
glyphs.append(0x0098) #uni0679.fina
glyphs.append(0x01FA) #uni06E3
glyphs.append(0x026B) #uni075D.fina
glyphs.append(0x01FF) #uni06E8
glyphs.append(0x02E0) #uni06E9
glyphs.append(0x0202) #uni06ED
glyphs.append(0x022A) #uni06EE
glyphs.append(0x000D) #uni0631.fina
glyphs.append(0x0125) #uni067A.init
glyphs.append(0x0200) #uni06EA
glyphs.append(0x003C) #uni066F.fina
glyphs.append(0x01A6) #uni06AA.medi
glyphs.append(0x0275) #uni075F.medi
glyphs.append(0x000F) #uni0633.init
glyphs.append(0x02F0) #twodots_alt1.above
glyphs.append(0x01C8) #uni06B4.fina
glyphs.append(0x019E) #uni06A6.medi
glyphs.append(0x0121) #uni0678
glyphs.append(0x0095) #uni0679
glyphs.append(0x011D) #uni0676
glyphs.append(0x011F) #uni0677
glyphs.append(0x011B) #uni0675
glyphs.append(0x0117) #uni0672
glyphs.append(0x0119) #uni0673
glyphs.append(0x006D) #uni0670
glyphs.append(0x0083) #uni0671
glyphs.append(0x02A9) #uni076D.medi
glyphs.append(0x01D1) #uni06B6.init
glyphs.append(0x026E) #uni075E
glyphs.append(0x02AE) #twodots.vert.small.above
glyphs.append(0x00B4) #uni0636.init
glyphs.append(0x0268) #uni075C.init
glyphs.append(0x02C5) #uni06C9.fina
glyphs.append(0x00B8) #uni0638.init
glyphs.append(0x0160) #uni068F.fina
glyphs.append(0x0204) #uni06FB.fina
glyphs.append(0x00FE) #uni06F4.urdu
glyphs.append(0x012D) #uni067C.init
glyphs.append(0x025F) #uni0758.medi
glyphs.append(0x0037) #uni066F.init
glyphs.append(0x020F) #uni06440672.isol
glyphs.append(0x01A2) #uni06A8.fina
glyphs.append(0x00B6) #uni0636.fina
glyphs.append(0x00B1) #uni0634.medi
glyphs.append(0x008F) #uni062A.medi
glyphs.append(0x02F3) #uni069F.init
glyphs.append(0x00BE) #uni063A.fina
glyphs.append(0x0241) #uni0751.fina
glyphs.append(0x0213) #uni06440675.isol
glyphs.append(0x0285) #uni0763.medi
glyphs.append(0x00B5) #uni0636.medi
glyphs.append(0x02E4) #threedots.rev_alt1.below
glyphs.append(0x02A7) #uni076D.fina
glyphs.append(0x0176) #uni069B.fina
glyphs.append(0x027F) #uni0762.fina
glyphs.append(0x0148) #uni0684.fina
glyphs.append(0x02EE) #threedots_alt1.above
glyphs.append(0x00EC) #uni06440671.isol
glyphs.append(0x02A8) #uni076D.init
glyphs.append(0x01F2) #uni06D8
glyphs.append(0x004F) #uni06C1.medi
glyphs.append(0x0128) #uni067B.fina
glyphs.append(0x0126) #uni067A.medi
glyphs.append(0x009B) #uni0686.init
glyphs.append(0x012E) #uni067C.medi
glyphs.append(0x02F4) #uni069F.medi
glyphs.append(0x0198) #uni06A5.fina
glyphs.append(0x0263) #uni075A.fina
glyphs.append(0x028B) #uni0765.fina
glyphs.append(0x02B2) #fourdots.above
glyphs.append(0x0249) #uni0753.fina
glyphs.append(0x009F) #uni062C.medi
glyphs.append(0x025A) #uni0757.init
glyphs.append(0x0291) #uni0766.medi
glyphs.append(0x00A3) #uni062E.medi
glyphs.append(0x00C9) #uni0642.init
glyphs.append(0x00BB) #uni063A
glyphs.append(0x0145) #uni0683.init
glyphs.append(0x017E) #uni0
|
GabrielFortin/ansible-module-f5bigip
|
library/f5bigip_ltm_traffic_class.py
|
Python
|
apache-2.0
| 5,265
| 0.002849
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <erjac77@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_traffic_class
short_description: BIG-IP ltm traffic class module
description:
- Configures a traffic class.
version_added: "2.4"
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
app_service:
description:
- Specifies the application service to which the object belongs.
classification:
description:
- Specifies the actual textual tag to be associated with the flow if the traffic class is matched.
required: true
description:
description:
- Specifies descriptive text that identifies the component.
destination_address:
description:
- Specifies destination IP addresses for the system to use when evaluating traffic flow.
destination_mask:
description:
- Specifies a destination IP address mask for the system to use when evaluating traffic flow.
destination_port:
description:
- Specifies a destination port for the system to use when evaluating traffic flow.
default: 0
name:
description:
- Specifies a unique name for the component.
required: true
partition:
description:
- Specifies the administrative partition in which the component object resides.
default: Common
protocol:
description:
- Specifies a protocol for the system to use when evaluating traffic flow.
default: any
source_address:
description:
- Specifies source IP addresses for the system to use when evaluating traffic flow.
source_mask:
description:
- Specifies a source IP address mask for the system to use when evaluating traffic flow.
source_port:
description:
- Specifies a source port for the system to use when evaluating traffic flow.
default: 0
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM Traffic Class
f5bigip_ltm_traffic_class:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_traffic_class
partition: Common
classification: traffic_class
description: My ltm traffic class
destination_port: 21
protocol: tcp
source_port: 21
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
app_service
|
=dict(type='str'),
classification=dict(type='str'),
description=dict(type='str'),
destination_address=dict(type='str'),
destination_mask=dict(type='str'),
destination_port=dict(type='int'),
file_name=dict(type='str'),
protocol=dict(type='str'),
source_address=dict(type='str'),
source_mask=dict(
|
type='str'),
source_port=dict(type='int')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmTrafficClass(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.ltm.traffic_class_s.traffic_class.create,
'read': self._api.tm.ltm.traffic_class_s.traffic_class.load,
'update': self._api.tm.ltm.traffic_class_s.traffic_class.update,
'delete': self._api.tm.ltm.traffic_class_s.traffic_class.delete,
'exists': self._api.tm.ltm.traffic_class_s.traffic_class.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpLtmTrafficClass(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
|
garrettr/securedrop
|
securedrop/tests/test_db.py
|
Python
|
agpl-3.0
| 2,826
| 0
|
# -*- coding: utf-8 -*-
from flask_testing import TestCase
import mock
import journalist
from utils import db_helper, env
from db import (Journalist, Submission, Reply, get_one_or_else,
LoginThrottledException)
class TestDatabase(TestCase):
def create_app(self):
return journalist.app
def setUp(self):
env.setup()
def tearDown(self):
env.teardown()
@mock.patch('flask.abort')
def test_get_one_or_else_returns_one(self, mock):
new_journo, _ = db_helper.init_journalist()
query = Journalist.query.filter(
Journalist.username == new_journo.username)
with mock.patch('logger') as mock_logger:
selected_journo = get_one_or_else(query, mock_logger, mock)
self.assertEqual(new_journo, selected_journo)
@mock.patch('flask.abort')
def test_get_one_or_else_multiple_results(self, mock):
journo_1, _ = db_helper.init_journalist()
journo_2, _ = db_helper.init_journalist()
with mock.patch('logger') as mock_logger:
get_one_or_else(Journalist.query, mock_logger, mock)
mock_logger.error.assert_called() # Not specifying very long log line
mock.assert_called_with(500)
@mock.patch('flask.abort')
def test_get_one_or_else_no_result_found(self, mock):
query = Journalist.query.filter(Journalist.username == "alice")
with mock.patch('logger') as mock_logger:
get_one_or_else(query, mock_logger, mock)
log_line = ('Found none when one was expected: '
'No row was found for one()')
mock_logger.error.assert_called_with(log_line)
mock.assert_called_with(404)
# Check __repr__ do not throw exceptions
def test_submission_string_representation(self):
source, _ = db_helper.init_source()
db_helper.submit(source, 2)
test_submission = Submission.query.first()
test_submission.__repr__()
def test_reply_string_representation(self):
journalist, _ = db_helper.init_journalist()
source, _ = db_helper.init_source()
db_helper.reply(journalist, source, 2)
test_reply = Reply.query.first()
test_reply.__repr__()
def test_journalist_string_representation(self):
test_journalist, _ = db_helper.init_journalist()
test_journalist.__repr__()
def test_source_string_representation(self):
test_source, _ = db_helper.init_source()
test_source.__repr__()
def test_throttle_login(self):
journalist, _ = db_helper.init_journalist()
for _ in range(Journalist._MAX_LOGIN_ATTEMPTS_PER_PERIOD):
Journalist.throttle_login(journalist)
with self.assertRaises(LoginThrottledException):
|
Journalist.throttle_login(journalist)
|
|
mitsuhiko/fungiform
|
setup.py
|
Python
|
bsd-3-clause
| 1,459
| 0.020562
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Fungiform
~~~~~~~~~
A form handling system that previously was used for Pocoo's Zine
and Plurk's Solace software. Unbundled into a separate library that
is framework independent.
This is still a preview release. Check the source for more information.
:copyright: (c) 2010 by Armin Ronacher, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from s
|
etuptools import setup
except ImportError:
from distutils.core i
|
mport setup
setup(
name = 'Fungiform',
version = '0.2',
url = 'http://github.com/mitsuhiko/fungiform',
license = 'BSD License',
author = 'Armin Ronacher',
author_email = 'armin.ronacher@active-4.com',
description = 'form library',
long_description = __doc__,
keywords = 'form library',
packages = ['fungiform', 'fungiform.tests'],
platforms = 'any',
zip_safe = False,
test_suite = 'fungiform.tests.suite',
include_package_data = True,
classifiers = [
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Development Status :: 4 - Beta'
],
)
|
bernard357/shellbot
|
shellbot/stores/sqlite.py
|
Python
|
apache-2.0
| 5,489
| 0
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import colorlog
import logging
import os
from multiprocessing import Lock, Manager
import sqlite3
from .base import Store
class SqliteStore(Store):
"""
Stores data for one space
This is a basic permanent key-value store.
Example::
store = SqliteStore(db='shellstore.db', id=space.id)
"""
def on_init(self,
prefix='sqlite',
id=None,
db=None,
**kwargs):
"""
Adds processing to initialization
:param prefix: the main keyword for configuration of this space
:type prefix: str
:param id: the unique identifier of the related space (optional)
:type id: str
:param db: name of the file that contains Sqlite data (optional)
:type db: str
Example::
store = SqliteStore(context=context, prefix='sqlite')
Here we create a new store powered by Sqlite, and use
settings under the key ``sqlite`` in the context of
|
this bot.
"""
assert prefix
self.prefix = prefix
self.id = id if id else '*id'
if db:
self.context.set(self.prefix+'.db', db)
def check(self):
"""
Checks configuration
"""
self.context.check(self.prefix+'.db', 'store.db')
def get_db(self):
"""
Gets a handle on the database
"""
db
|
= self.context.get(self.prefix+'.db', 'store.db')
return sqlite3.connect(db)
def bond(self, id=None):
"""
Creates or uses a file to store data
:param id: the unique identifier of the related space
:type id: str
"""
if id:
self.id = id
handle = self.get_db()
try:
handle.execute("CREATE TABLE store \
(id INTEGER PRIMARY KEY, \
context TEXT, \
key TEXT UNIQUE, \
value TEXT)")
except sqlite3.OperationalError as feedback:
logging.debug(feedback)
def _set(self, key, value, handle=None):
"""
Sets a permanent value
:param key: name of the value
:type key: str
:param value: actual value
:type value: any serializable type is accepted
:param handle: an optional instance of a Sqlite database
:type handle: a connection
This functions stores or updates a value in the back-end storage
system.
Example::
store._set('parameter_123', 'George')
"""
handle = handle if handle else self.get_db()
cursor = handle.cursor()
cursor.execute("DELETE FROM store WHERE context=? AND key=?",
(self.id, key))
cursor.execute("INSERT INTO store (context,key,value) VALUES (?,?,?)",
(self.id, key, value))
handle.commit()
cursor.close()
def _get(self, key, handle=None):
"""
Gets a permanent value
:param key: name of the value
:type key: str
:param handle: an optional instance of a Sqlite database
:type handle: a connection
:return: the actual value, or None
Example::
value = store._get('parameter_123')
"""
handle = handle if handle else self.get_db()
cursor = handle.cursor()
cursor.execute("SELECT value FROM store WHERE context=? AND key=?",
(self.id, key))
result = cursor.fetchone()
try:
return result[0]
except TypeError:
return None
def _clear(self, key=None, handle=None):
"""
Forgets a value or all values
:param key: name of the value to forget, or None
:type key: str
:param handle: an optional instance of a Sqlite database
:type handle: a connection
To clear only one value, provide the name of it.
For example::
store._clear('parameter_123')
To clear all values in the store, just call the function
without a value.
For example::
store._clear()
"""
handle = handle if handle else self.get_db()
if not key:
cursor = handle.cursor()
cursor.execute("DELETE FROM store WHERE context=?",
(self.id,))
handle.commit()
cursor.close()
else:
cursor = handle.cursor()
cursor.execute("DELETE FROM store WHERE context=? AND key=?",
(self.id, key))
handle.commit()
cursor.close()
|
xmedius/xmedius-mailrelayserver
|
xmediusmailrelayserver/__init__.py
|
Python
|
mit
| 38
| 0.026316
|
from xmediu
|
smailrela
|
yserver import *
|
singingwolfboy/flask-dance
|
tests/contrib/test_reddit.py
|
Python
|
mit
| 3,475
| 0.001151
|
import pytest
import responses
from urlobject import URLObject
from flask import Flask
from flask_dance.contrib.reddit import make_reddit_blueprint, reddit
from flask_dance.consumer import OAuth2ConsumerBlueprint
from flask_dance.consumer.storage import MemoryStorage
@pytest.fixture
def make_app():
"A callable to create a Flask app with the Reddit provider"
def _make_app(*args, **kwargs):
app = Flask(__name__)
app.secret_key = "whatever"
blueprint = make_reddit_blueprint(*args, **kwargs)
app.register_blueprint(blueprint)
return app
return _make_app
def test_blueprint_factory():
reddit_bp = make_reddit_blueprint(
client_id="foo", client_secret="bar", scope="identity", redirect_to="index"
)
assert isinstance(reddit_bp, OAuth2ConsumerBlueprint)
assert reddit_bp.session.scope == "identity"
assert reddit_bp.session.base_url == "https://oauth.reddit.com/"
assert reddit_bp.session.client_id == "foo"
assert reddit_bp.client_secret == "bar"
assert reddit_bp.authorization_url == "https://www.reddit.com/api/v1/authorize"
assert reddit_bp.token_url == "https://www.reddit.com/api/v1/access_token"
def test_blueprint_factory_with_permanent_token():
reddit_bp = make_reddit_blueprint(
client_id="foo",
client_secret="bar",
scope="identity",
redirect_to="index",
permanent=True,
)
assert isinstance(reddit_bp, OAuth2ConsumerBlueprint)
assert reddit_bp.session.scope == "identity"
assert reddit_bp.session.base_url == "https://oauth.reddit.com/"
assert reddit_bp.session.client_id == "foo"
assert reddit_bp.client_secret == "bar"
assert reddit_bp.authorization_url == "https://www.reddit.com/api/v1/authorize"
assert reddit_bp.token_url == "https://www.reddit.com/api/v1/access_token"
assert reddit_bp.authorization_url_params["duration"] == "permanent"
def test_load_from_config(make_app):
app = make_app()
app.config["REDDIT_OAUTH_CLIENT_ID"] = "foo"
app.config["REDDIT_OAUTH_CLIENT_SECRET"] = "bar"
resp = app.test_client().get("/reddit")
url = resp.headers["Location"]
client_id = URLObject(url).query.dict.get("client_id")
assert client_id == "foo"
@responses.activate
def test_context_local(make_app):
responses.add
|
(responses.GET, "https://google.com")
# set up two apps with two d
|
ifferent set of auth tokens
app1 = make_app(
"foo1",
"bar1",
redirect_to="url1",
storage=MemoryStorage({"access_token": "app1"}),
)
app2 = make_app(
"foo2",
"bar2",
redirect_to="url2",
storage=MemoryStorage({"access_token": "app2"}),
)
# outside of a request context, referencing functions on the `reddit` object
# will raise an exception
with pytest.raises(RuntimeError):
reddit.get("https://google.com")
# inside of a request context, `reddit` should be a proxy to the correct
# blueprint session
with app1.test_request_context("/"):
app1.preprocess_request()
reddit.get("https://google.com")
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer app1"
with app2.test_request_context("/"):
app2.preprocess_request()
reddit.get("https://google.com")
request = responses.calls[1].request
assert request.headers["Authorization"] == "Bearer app2"
|
CPedrini/TateTRES
|
erapi.py
|
Python
|
apache-2.0
| 11,009
| 0.004906
|
#-*- encoding: utf-8 -*-
import csv, math, time, re, threading, sys
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
class ErAPI():
# Metodo constructor, seteos basicos necesarios de configuracion, instancia objetos utiles
def __init__(self):
self.data = {}
# Data format: {'XXCiro|BNC': {'id': 123456, 'nick': 'XXCiro', 'level': 49, 'strength': 532.5, 'rank_points': 1233354, 'citizenship': 'Argentina'}}
# Diccionario de puntos/rango
self.rank_required_points = {
"Recruit": 0,
"Private": 15,
"Private*": 45,
"Private**": 80,
"Private***": 120,
"Corporal": 170,
"Corporal*": 250,
"Corporal**": 350,
"Corporal***": 450,
"Sergeant": 600,
"Sergeant*": 800,
"Sergeant**": 1000,
"Sergeant***": 1400,
"Lieutenant": 1850,
"Lieutenant*": 2350,
"Lieutenant**": 3000,
"Lieutenant***": 3750,
"Captain": 5000,
"Captain*": 6500,
"Captain**": 9000,
"Captain***": 12000,
"Major": 15500,
"Major*": 20000,
"Major**": 25000,
"Major***": 31000,
"Commander": 40000,
"Commander*": 5
|
2000,
"Commander**": 67000,
"Commander***": 85000,
"Lt Colonel": 110000,
"Lt Colonel*": 140000,
"Lt Colonel**": 180000,
|
"Lt Colonel***": 225000,
"Colonel": 285000,
"Colonel*": 355000,
"Colonel**": 435000,
"Colonel***": 540000,
"General": 660000,
"General*": 800000,
"General**": 950000,
"General***": 1140000,
"Field Marshal": 1350000,
"Field Marshal*": 1600000,
"Field Marshal**": 1875000,
"Field Marshal***": 2185000,
"Supreme Marshal": 2550000,
"Supreme Marshal*": 3000000,
"Supreme Marshal**": 3500000,
"Supreme Marshal***": 4150000,
"National Force": 4900000,
"National Force*": 5800000,
"National Force**": 7000000,
"National Force***": 9000000,
"World Class Force": 11500000,
"World Class Force*": 14500000,
"World Class Force**": 18000000,
"World Class Force***": 22000000,
"Legendary Force": 26500000,
"Legendary Force*": 31500000,
"Legendary Force**": 37000000,
"Legendary Force***": 42000000,
"God of War": 50000000,
"God of War*": 100000000 ,
"God of War**": 200000000,
"God of War***": 500000000,
"Titan": 1000000000,
"Titan*": 2000000000,
"Titan**": 4000000000,
"Titan***": 10000000000}
# Lista ordenada de rangos segun importancia
self.rank_to_pos = [
"Recruit",
"Private",
"Private*",
"Private**",
"Private***",
"Corporal",
"Corporal*",
"Corporal**",
"Corporal***",
"Sergeant",
"Sergeant*",
"Sergeant**",
"Sergeant***",
"Lieutenant",
"Lieutenant*",
"Lieutenant**",
"Lieutenant***",
"Captain",
"Captain*",
"Captain**",
"Captain***",
"Major",
"Major*",
"Major**",
"Major***",
"Commander",
"Commander*",
"Commander**",
"Commander***",
"Lt Colonel",
"Lt Colonel*",
"Lt Colonel**",
"Lt Colonel***",
"Colonel",
"Colonel*",
"Colonel**",
"Colonel***",
"General",
"General*",
"General**",
"General***",
"Field Marshal",
"Field Marshal*",
"Field Marshal**",
"Field Marshal***",
"Supreme Marshal",
"Supreme Marshal*",
"Supreme Marshal**",
"Supreme Marshal***",
"National Force",
"National Force*",
"National Force**",
"National Force***",
"World Class Force",
"World Class Force*",
"World Class Force**",
"World Class Force***",
"Legendary Force",
"Legendary Force*",
"Legendary Force**",
"Legendary Force***",
"God of War",
"God of War*",
"God of War**",
"God of War***",
"Titan",
"Titan*",
"Titan**",
"Titan***",]
# Bandera de ejecucion, util en caso de que se decida matar de forma manual los threads para actualizar y guardar los datos
self.run = True
# Se paraleliza la carga de datos en un hilo nuevo, el cual es demonio del invocador en caso de "muerte prematura"
th = threading.Thread(target=self.data_loader)
th.daemon = True
th.start()
# Metodo invocador, carga datos y crea threads para guardar y actualizar informacion, solo llamado desde constructor
def data_loader(self):
self.load_data()
self.data_saver_th = threading.Thread(target=self.data_saver)
self.data_saver_th.daemon = True
self.data_saver_th.start()
self.data_updater_th = threading.Thread(target=self.data_updater)
self.data_updater_th.daemon = True
self.data_updater_th.start()
# Metodo para volcar informacion a archivo fisico, solo llamado de metodo data_loader
def data_saver(self):
while self.run:
self.save_data()
time.sleep(60)
# Metodo para actualizar informacion, solo llamado de metodo data_loader
def data_updater(self):
while self.run:
for irc_nick in self.data:
self.update_data(irc_nick)
time.sleep(30)
time.sleep(600)
# ---------------------------------------------------------------------------------- #
# @ PUBLIC METHODS #
# ---------------------------------------------------------------------------------- #
# Metodo para actualizar informacion local del objeto desde archivo
def load_data(self):
try:
f = open('data/er_nick-data.csv', 'rt')
reader = csv.reader(f)
for nick_irc,id,nick_er,level,strength,rank_points,citizenship in reader:
self.data[nick_irc] = {'id': int(id), 'nick': nick_er, 'level': int(level), 'strength': float(strength), 'rank_points': int(rank_points), 'citizenship': citizenship}
f.close()
except:
pass
# Metodo para guardar informacion local del objeto en archivo
def save_data(self):
try:
f = open('data/er_nick-data.csv', 'wt')
writer = csv.writer(f)
for u in self.data:
writer.writerow([u, self.data[u]['id'], self.data[u]['nick'], self.data[u]['level'], self.data[u]['strength'], self.data[u]['rank_points'], self.data[u]['citizenship']])
f.close()
except:
pass
# Metodo scraper para actualizar informacion local del objeto del nick de irc especificado
def update_data(self, irc_nick):
try:
id = self.data[irc_nick]['id']
c = urlopen('http://www.erepublik.com/es/citizen/profile/%d' % id)
page = c.read()
c.close()
self.data[irc_nick]['nick'] = re.search('<meta name="title" content="(.+?) - Ciudadano del Nuevo Mundo" \/>', page.decode('utf-8')).group(1)
self.data[irc_nick]['level'] = int(re.search('<strong class="citizen_level">(.+?)<\/strong>', page.decode('utf-8'), re.DOTALL).group(1))
self.
|
manhhomienbienthuy/scikit-learn
|
examples/linear_model/plot_omp.py
|
Python
|
bsd-3-clause
| 2,054
| 0.000974
|
"""
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(
n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0,
)
(idx,) = w.nonzero()
# distort the clean signal
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx], use_line_collection=True)
# plot the noise-free reconstruction
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coe
|
fs, normalize=False)
omp.fit(X, y)
coef = omp.coef_
(idx_r,) = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.
|
stem(idx_r, coef[idx_r], use_line_collection=True)
# plot the noisy reconstruction
omp.fit(X, y_noisy)
coef = omp.coef_
(idx_r,) = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r], use_line_collection=True)
# plot the noisy reconstruction with number of non-zeros set by CV
omp_cv = OrthogonalMatchingPursuitCV(normalize=False)
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
(idx_r,) = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r], use_line_collection=True)
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle("Sparse signal recovery with Orthogonal Matching Pursuit", fontsize=16)
plt.show()
|
Crowdcomputer/CroCoAPI
|
ui/views.py
|
Python
|
gpl-2.0
| 3,534
| 0.005376
|
import logging
from django.contrib import messages
from django.contrib.auth import authenticate
from django.core.urlresolvers import reverse
from django.http.response import Http404, HttpResponseRedirect
from django.shortcuts import render, redirect, render_to_response
# Create your views here.
from django.template.context import RequestContext
from rest_framework.authtoken.models import Token
from api.models import App
from ui.forms import LoginForm
log = logging.getLogger(__name__)
def login(request):
# if request.user.is_authenticated():
# return redirect('/')
callback = request.GET.get('callback', '')
if not callback.endswith("/"):
callback=callback+"/"
log.debug("callback %s",callback)
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is not None:
auth_app = user.crowduser.auth_apps.all()
try:
app = App.objects.get(callback=callback)
except Exception:
raise Http404
token = Token.objects.get(user=user)
if app not in auth_app:
log.debug("not in app")
return redirect(reverse(auth)+"?callback="+callback+"&token="+token.key)
else:
log.debug("in app")
# log.debug("Username %s",user.username)
# get the app
# apptoken = request.META.get('HTTP_AUTHORIZATION', b'')
callback = request.GET.get('callback', '')
if type(callback) == type(''):
raise Http404
token = Token.objects.get(user=user)
redirect_to = callback+"?token="+token.key
return HttpResponseRedirect(redirect_to)
else:
messages.info(request,'username and password not valid')
form.helper.form_action = reverse('login') + '?callback=' + callback
render_to_response('ui/login.html', {'form': form}, context_instance=RequestContext(request))
else:
form.helper.form_action = reverse('login') + '?callback=' + callback
render_to_response('ui/login.html', {'form': form}, context_instance=RequestContext(request))
else:
form = LoginForm()
form.helper.form_action = reverse('login') + '?callback=' + callback
# context = {'form': form,'callback':callback}
# context = {}
return render_to_response('ui/login.html', {'form': form}, context_instance=RequestContext(request))
def auth(request):
callback = request.GET.get('callback', '')
token = request.GET.get('token', '')
if not callback.endswith("/"):
callback=callback+"/"
if request.method == 'POST':
token = Token.objects.get(key=token)
app = App.objects.get(callback=callback)
crowduser = token.user.crowduser
crowduser.auth_apps.add(app)
crowduser.save()
redirect_to = callback+"?token="+token.
|
key+"&id="+crowduser.user.pk
return HttpResponseRedirect(redirect_to)
else:
app = App.objects.get(callback=callback)
return render_to_response('ui/app.html', {'app': app,'callback':callback,'token':token}, context_instance=RequestCont
|
ext(request))
|
tsileo/blobstash-python-docstore
|
blobstash/docstore/attachment.py
|
Python
|
mit
| 2,286
| 0.002625
|
"""Attachment utils."""
from pathlib import Path
from uuid import uuid4
from blobstash.docstore.error import DocStoreError
from blobstash.filetree import FileTreeClient
_FILETREE_POINTER_FMT = "@filetree/ref:{}"
_FILETREE_ATTACHMENT_FS_PREFIX = "_filetree:docstore"
class Attachment:
"""An attachment represents a file stored in FileTree and tied to the document via a pointer."""
def __init__(self, pointer, node):
self.pointer = pointer
self.node = node
def __repr__(self):
return "blobstash.docstore.attachment.Attachment(pointer={!r}, node={!r})".format(
self.pointer, self.node
)
def add_attachment(client, path):
"""Creates a new attachment (i.e. upload the file or directory to FileTree), and returns a pointer object."""
p = Path(path)
if p.is_file():
with open(p.absolute(), "rb") as fileobj:
node = FileTreeClient(client=client).fput_node(
p.name, fileobj, content_type=None
)
else:
fs = FileTreeClient(client=client).fs(
uuid4().hex, prefix=_FILETREE_ATTACHMENT_FS_PREFIX
)
fs.upload(path)
node = fs.node()
pointer = _FILETREE_POINTER_FMT.format(node.ref)
return Attachme
|
nt(pointer, node)
def fadd_attachment(client, name, fileobj, content_type=None):
"""Creates a new attachment from the fileobj content with name as filename and returns a pointer object."""
node = FileTreeClient(client=client).fput_node(n
|
ame, fileobj, content_type)
pointer = _FILETREE_POINTER_FMT.format(node.ref)
return Attachment(pointer, node)
def fget_attachment(client, attachment):
"""Returns a fileobj (that needs to be closed) with the content off the attachment."""
node = attachment.node
if node.is_dir():
raise DocStoreError(
"cannot get a fileobj for a directory, please use get_attachment instead"
)
return FileTreeClient(client=client).fget_node(node)
def get_attachment(client, attachment, path):
node = attachment.node
if node.is_file():
FileTreeClient(client=client).get_node(node, path)
return
FileTreeClient(client=client).fs(
ref=node.ref, prefix=_FILETREE_ATTACHMENT_FS_PREFIX
).download(path)
|
demharters/git_scripts
|
calc_movement_HO.py
|
Python
|
apache-2.0
| 795
| 0.021384
|
#! /usr/bin/python
import MDAnalysis
import sys
from pylab import *
my_traj = sys.argv[1]
myTitle = sys.argv[2]
u = MDAnalysis.Universe(my_traj,my_traj)
OH = u.selectAtoms("segid B and resid 18 and name HO")
xArr = []
yArr = []
zArr = []
data = []
for ts in u.trajectory:
xArr.append(OH.coordinates()[0,0])
yArr.append(OH.coordinates()[0,1])
zArr.append(OH.coordinates()[0,2])
def normalise(myArray):
newArray = []
mean = sum(myArray)/float(len(myArray))
for i in range(0,len(myArray)):
newArray.append(myArray[i]-mean)
return newArray
for myArray in (xArr,yArr,zArr):
newArray = normalise(myArray)
dat
|
a.append(newArray)
boxplot(data)
xticks([1,2,3],["x","y","z"])
ylim(-0.4,0.4)
title("%s_HO"%myTitle)
|
savefig("%s_HO.png"%myTitle)
|
Madpilot0/Flask-Simpleauth
|
lib/Users.py
|
Python
|
gpl-3.0
| 1,789
| 0.035774
|
from flask import Flask, render_template, request, jsonify, session, redirect, escape, url_for
import bcrypt
class ServerError(Exception):pass
def loginForm(db, form):
error = None
try:
username = form['username']
cur = db.query("SELECT COUNT(1) FROM users WHERE user = %s", [username])
if not cur.fetchone()[0]:
raise ServerError('Incorrect username / password')
password = form['password']
cur = db.query("SELECT pass FROM users WHERE user = %s;", [username])
for row in cur.fetchall():
if bcrypt.hashpw(password.encode('utf-8'), row[0]) == row[0]:
session['username'] = form['username']
return error
raise ServerError('Incorrect username / password')
except ServerError as e:
error = str(e)
return error
def registerUser(db, form, ROUNDS):
error = None
try:
username = form['username']
password = form['password']
email = form['email']
if not username or not password or not email:
raise ServerError('Fill in all fields')
password = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt(ROUNDS))
|
cur = db.query("SELECT COUNT(*) FROM users WHERE user = %s",[username])
c = cur.fetchone()
if c[0] == 0:
cur = db.query("INSERT INTO users (`user`, `email`, `pass`) VALUES (%s,%s,%s)", [username, email, password])
return None
else:
return "User exists"
except ServerError as e:
error = str(e)
return error
def getUsers(db):
erro
|
r = None
try:
userlist = []
cur = db.query("SELECT user, email FROM users")
for row in cur.fetchall():
userlist.append({'name': row[0], 'email': row[1]})
return userlist
except:
error = "Failed"
return error
def deleteUser(db, user):
error = None
try:
cur = db.query("DELETE FROM users WHERE user = %s",[user])
return None
except:
return "Failed"
|
taedori81/e-commerce-template
|
saleor/product/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 7,464
| 0.005761
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import versatileimagefield.fields
import jsonfield.fields
from decimal import Decimal
import saleor.product.models.fields
import django.core.validators
import django_prices.models
import satchless.item
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='AttributeChoiceValue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('display', models.CharField(max_length=100, verbose_name='display name')),
('color', models.CharField(blank=True, max_length=7, verbose_name='color', validators=[django.core.validators.RegexValidator('^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$')])),
('image', versatileimagefield.fields.VersatileImageField(upload_to='attributes', null=True, verbose_name='image', blank=True)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, verbose_name='name')),
('slug', models.SlugField(verbose_name='slug')),
('description', models.TextField(verbose_name='description', blank=True)),
('hidden', models.BooleanField(default=False, verbose_name='hidden')),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('parent', models.ForeignKey(related_name='children', verbose_name='parent', blank=True, to='product.Category', null=True)),
],
options={
'verbose_name_plural': 'categories',
},
),
migrations.CreateModel(
name='FixedProductDiscount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('discount', django_prices.models.PriceField(currency=b'USD', verbose_name='discount value', max_digits=12, decimal_places=2)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, verbose_name='name')),
('description', models.TextField(verbose_name='description')),
('price', django_prices.models.PriceField(currency=b'USD', verbose_name='price', max_digits=12, decimal_places=2)),
('weight', saleor.product.models.fields.WeightField(unit=b'lb', verbose_name='weight', max_digits=6, decimal_places=2)),
('available_on', models.DateField(null=True, verbose_name='available on', blank=True)),
],
bases=(models.Model, satchless.item.ItemRange),
),
migrations.CreateModel(
name='ProductAttribute',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.SlugField(unique=True, verbose_name='internal name')),
('display', models.CharField(max_length=100, verbose_name='display name')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', versatileimagefield.fields.VersatileImageField(upload_to='products')),
('ppoi', versati
|
leimagefield.fields.
|
PPOIField(default='0.5x0.5', max_length=20, editable=False)),
('alt', models.CharField(max_length=128, verbose_name='short description', blank=True)),
('order', models.PositiveIntegerField(editable=False)),
('product', models.ForeignKey(related_name='images', to='product.Product')),
],
options={
'ordering': ['order'],
},
),
migrations.CreateModel(
name='ProductVariant',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sku', models.CharField(unique=True, max_length=32, verbose_name='SKU')),
('name', models.CharField(max_length=100, verbose_name='variant name', blank=True)),
('price_override', django_prices.models.PriceField(decimal_places=2, currency=b'USD', max_digits=12, blank=True, null=True, verbose_name='price override')),
('weight_override', saleor.product.models.fields.WeightField(decimal_places=2, max_digits=6, blank=True, null=True, verbose_name='weight override', unit=b'lb')),
('attributes', jsonfield.fields.JSONField(default={}, verbose_name='attributes')),
('product', models.ForeignKey(related_name='variants', to='product.Product')),
],
bases=(models.Model, satchless.item.Item),
),
migrations.CreateModel(
name='Stock',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('location', models.CharField(max_length=100, verbose_name='location')),
('quantity', models.IntegerField(default=Decimal('1'), verbose_name='quantity', validators=[django.core.validators.MinValueValidator(0)])),
('cost_price', django_prices.models.PriceField(decimal_places=2, currency=b'USD', max_digits=12, blank=True, null=True, verbose_name='cost price')),
('variant', models.ForeignKey(related_name='stock', verbose_name='variant', to='product.ProductVariant')),
],
),
migrations.AddField(
model_name='product',
name='attributes',
field=models.ManyToManyField(related_name='products', null=True, to='product.ProductAttribute', blank=True),
),
migrations.AddField(
model_name='product',
name='categories',
field=models.ManyToManyField(related_name='products', verbose_name='categories', to='product.Category'),
),
migrations.AddField(
model_name='fixedproductdiscount',
name='products',
field=models.ManyToManyField(to='product.Product', blank=True),
),
migrations.AddField(
model_name='attributechoicevalue',
name='attribute',
field=models.ForeignKey(related_name='values', to='product.ProductAttribute'),
),
migrations.AlterUniqueTogether(
name='stock',
unique_together=set([('variant', 'location')]),
),
]
|
pcu4dros/pandora-core
|
workspace/lib/python3.5/site-packages/psycopg2/errorcodes.py
|
Python
|
mit
| 13,241
| 0.000076
|
"""Error codes for PostgresSQL
This module contains symbolic names for all PostgreSQL error codes.
"""
# psycopg2/errorcodes.py - PostgreSQL error codes
#
# Copyright (C) 2006-2010 Johan Dahlin <jdahlin@async.com.br>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# Based on:
#
# http://www.postgresql.org/docs/current/static/errcodes-appendix.html
#
def lookup(code, _cache={}):
"""Lookup an error code or class code and return its symbolic name.
Raise `KeyError` if the code is not found.
"""
if _cache:
return _cache[code]
# Generate the lookup map at first usage.
tmp = {}
for k, v in globals().items():
if isinstance(v, str) and len(v) in (2, 5):
tmp[v] = k
assert tmp
# Atomic update, to avoid race condition on import (bug #382)
_cache.update(tmp)
return _cache[code]
# autogenerated data: do not edit below this point.
# Error classes
CLASS_SUCCESSFUL_COMPLETION = '00'
CLASS_WARNING = '01'
CLASS_NO_DATA = '02'
CLASS_SQL_STATEMENT_NOT_YET_COMPLETE = '03'
CLASS_CONNECTION_EXCEPTION = '08'
CLASS_TRIGGERED_ACTION_EXCEPTION = '09'
CLASS_FEATURE_NOT_SUPPORTED = '0A'
CLASS_INVALID_TRANSACTION_INITIATION = '0B'
CLASS_LOCATOR_EXCEPTION = '0F'
CLASS_INVALID_GRANTOR = '0L'
CLASS_INVALID_ROLE_SPECIFICATION = '0P'
CLASS_DIAGNOSTICS_EXCEPTION = '0Z'
CLASS_CASE_NOT_FOUND = '20'
CLASS_CARDINALITY_VIOLATION = '21'
CLASS_DATA_EXCEPTION = '22'
CLASS_INTEGRITY_CONSTRAINT_VIOLATION = '23'
CLASS_INVALID_CURSOR_STATE = '24'
CLASS_INVALID_TRANSACTION_STATE = '25'
CLASS_INVALID_SQL_STATEMENT_NAME = '26'
CLASS_TRIGGERED_DATA_CHANGE_VIOLATION = '27'
CLASS_INVALID_AUTHORIZATION_SPECIFICATION = '28'
CLASS_DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST = '2B'
CLASS_INVALID_TRANSACTION_TERMINATION = '2D'
CLASS_SQL_ROUTINE_EXCEPTION = '2F'
CLASS_INVALID_CURSOR_NAME = '34'
CLASS_EXTERNAL_RO
|
UTINE_EXCEPTION = '38'
CLASS_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION = '39'
CLASS_SAVEPOI
|
NT_EXCEPTION = '3B'
CLASS_INVALID_CATALOG_NAME = '3D'
CLASS_INVALID_SCHEMA_NAME = '3F'
CLASS_TRANSACTION_ROLLBACK = '40'
CLASS_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION = '42'
CLASS_WITH_CHECK_OPTION_VIOLATION = '44'
CLASS_INSUFFICIENT_RESOURCES = '53'
CLASS_PROGRAM_LIMIT_EXCEEDED = '54'
CLASS_OBJECT_NOT_IN_PREREQUISITE_STATE = '55'
CLASS_OPERATOR_INTERVENTION = '57'
CLASS_SYSTEM_ERROR = '58'
CLASS_CONFIGURATION_FILE_ERROR = 'F0'
CLASS_FOREIGN_DATA_WRAPPER_ERROR = 'HV'
CLASS_PL_PGSQL_ERROR = 'P0'
CLASS_INTERNAL_ERROR = 'XX'
# Class 00 - Successful Completion
SUCCESSFUL_COMPLETION = '00000'
# Class 01 - Warning
WARNING = '01000'
NULL_VALUE_ELIMINATED_IN_SET_FUNCTION = '01003'
STRING_DATA_RIGHT_TRUNCATION = '01004'
PRIVILEGE_NOT_REVOKED = '01006'
PRIVILEGE_NOT_GRANTED = '01007'
IMPLICIT_ZERO_BIT_PADDING = '01008'
DYNAMIC_RESULT_SETS_RETURNED = '0100C'
DEPRECATED_FEATURE = '01P01'
# Class 02 - No Data (this is also a warning class per the SQL standard)
NO_DATA = '02000'
NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED = '02001'
# Class 03 - SQL Statement Not Yet Complete
SQL_STATEMENT_NOT_YET_COMPLETE = '03000'
# Class 08 - Connection Exception
CONNECTION_EXCEPTION = '08000'
SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION = '08001'
CONNECTION_DOES_NOT_EXIST = '08003'
SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION = '08004'
CONNECTION_FAILURE = '08006'
TRANSACTION_RESOLUTION_UNKNOWN = '08007'
PROTOCOL_VIOLATION = '08P01'
# Class 09 - Triggered Action Exception
TRIGGERED_ACTION_EXCEPTION = '09000'
# Class 0A - Feature Not Supported
FEATURE_NOT_SUPPORTED = '0A000'
# Class 0B - Invalid Transaction Initiation
INVALID_TRANSACTION_INITIATION = '0B000'
# Class 0F - Locator Exception
LOCATOR_EXCEPTION = '0F000'
INVALID_LOCATOR_SPECIFICATION = '0F001'
# Class 0L - Invalid Grantor
INVALID_GRANTOR = '0L000'
INVALID_GRANT_OPERATION = '0LP01'
# Class 0P - Invalid Role Specification
INVALID_ROLE_SPECIFICATION = '0P000'
# Class 0Z - Diagnostics Exception
DIAGNOSTICS_EXCEPTION = '0Z000'
STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER = '0Z002'
# Class 20 - Case Not Found
CASE_NOT_FOUND = '20000'
# Class 21 - Cardinality Violation
CARDINALITY_VIOLATION = '21000'
# Class 22 - Data Exception
DATA_EXCEPTION = '22000'
STRING_DATA_RIGHT_TRUNCATION = '22001'
NULL_VALUE_NO_INDICATOR_PARAMETER = '22002'
NUMERIC_VALUE_OUT_OF_RANGE = '22003'
NULL_VALUE_NOT_ALLOWED = '22004'
ERROR_IN_ASSIGNMENT = '22005'
INVALID_DATETIME_FORMAT = '22007'
DATETIME_FIELD_OVERFLOW = '22008'
INVALID_TIME_ZONE_DISPLACEMENT_VALUE = '22009'
ESCAPE_CHARACTER_CONFLICT = '2200B'
INVALID_USE_OF_ESCAPE_CHARACTER = '2200C'
INVALID_ESCAPE_OCTET = '2200D'
ZERO_LENGTH_CHARACTER_STRING = '2200F'
MOST_SPECIFIC_TYPE_MISMATCH = '2200G'
NOT_AN_XML_DOCUMENT = '2200L'
INVALID_XML_DOCUMENT = '2200M'
INVALID_XML_CONTENT = '2200N'
INVALID_XML_COMMENT = '2200S'
INVALID_XML_PROCESSING_INSTRUCTION = '2200T'
INVALID_INDICATOR_PARAMETER_VALUE = '22010'
SUBSTRING_ERROR = '22011'
DIVISION_BY_ZERO = '22012'
INVALID_ARGUMENT_FOR_NTILE_FUNCTION = '22014'
INTERVAL_FIELD_OVERFLOW = '22015'
INVALID_ARGUMENT_FOR_NTH_VALUE_FUNCTION = '22016'
INVALID_CHARACTER_VALUE_FOR_CAST = '22018'
INVALID_ESCAPE_CHARACTER = '22019'
INVALID_REGULAR_EXPRESSION = '2201B'
INVALID_ARGUMENT_FOR_LOGARITHM = '2201E'
INVALID_ARGUMENT_FOR_POWER_FUNCTION = '2201F'
INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION = '2201G'
INVALID_ROW_COUNT_IN_LIMIT_CLAUSE = '2201W'
INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE = '2201X'
INVALID_LIMIT_VALUE = '22020'
CHARACTER_NOT_IN_REPERTOIRE = '22021'
INDICATOR_OVERFLOW = '22022'
INVALID_PARAMETER_VALUE = '22023'
UNTERMINATED_C_STRING = '22024'
INVALID_ESCAPE_SEQUENCE = '22025'
STRING_DATA_LENGTH_MISMATCH = '22026'
TRIM_ERROR = '22027'
ARRAY_SUBSCRIPT_ERROR = '2202E'
INVALID_TABLESAMPLE_REPEAT = '2202G'
INVALID_TABLESAMPLE_ARGUMENT = '2202H'
FLOATING_POINT_EXCEPTION = '22P01'
INVALID_TEXT_REPRESENTATION = '22P02'
INVALID_BINARY_REPRESENTATION = '22P03'
BAD_COPY_FILE_FORMAT = '22P04'
UNTRANSLATABLE_CHARACTER = '22P05'
NONSTANDARD_USE_OF_ESCAPE_CHARACTER = '22P06'
# Class 23 - Integrity Constraint Violation
INTEGRITY_CONSTRAINT_VIOLATION = '23000'
RESTRICT_VIOLATION = '23001'
NOT_NULL_VIOLATION = '23502'
FOREIGN_KEY_VIOLATION = '23503'
UNIQUE_VIOLATION = '23505'
CHECK_VIOLATION = '23514'
EXCLUSION_VIOLATION = '23P01'
# Class 24 - Invalid Cursor State
INVALID_CURSOR_STATE = '24000'
# Class 25 - Invalid Transaction State
INVALID_TRANSACTION_STATE = '25000'
ACTIVE_SQL_TRANSACTION = '25001'
BRANCH_TRANSACTION_ALREADY_ACTIVE = '25002'
INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION = '25003'
INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION = '25004'
NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION = '25005'
READ_ONLY_SQL_TRANSACTION = '25006'
SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED = '25007'
HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL = '25008'
NO_ACTIVE_SQL_TRANSACTION = '25P01'
IN_FAILED_SQL_TRANSACTION = '25P02'
# Class 26 - Invalid SQL Statement Name
INVALID_SQL_STATEMENT_NAME = '26000'
# Class 27 - Triggered Data Change Violation
TRIGGERED_DATA_CHANGE_VIOLATION = '27000'
# Class 28 - Invalid Authorization Specification
INVALID_AUTHORIZATION_SPECIFICATION = '28000'
INVALID_PASSWORD = '28P01'
# Class 2B - Dependent Privilege Descriptors Still Exist
DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST = '2B000'
DEPENDENT_OBJECTS_STILL_EXIST = '2BP01'
# Class 2D - In
|
Eraldo/django-autocomplete-light
|
src/dal_queryset_sequence/fields.py
|
Python
|
mit
| 4,390
| 0
|
"""Autocomplete fields for QuerySetSequence choices."""
from dal_contenttypes.fields import (
ContentTypeModelMultipleFieldMixin,
GenericModelMixin,
)
from django import forms
from django.contrib.contenttypes.models import ContentType
from queryset_sequence import QuerySetSequence
class QuerySetSequenceFieldMixin(object):
"""Base methods for QuerySetSequence fields."""
def get_queryset_for_content_type(self, content_type_id):
"""Return the QuerySet from the QuerySetSequence for a ctype."""
content_type = ContentType.objects.get_for_id(content_type_id)
for queryset in self.queryset.query._querysets:
if queryset.model == content_type.model_class():
return queryset
def raise_invalid_choice(self, params=None):
"""
Raise a ValidationError for invalid_choice.
The validation error left unprecise about the exact error for security
reasons, to prevent an attacker doing information gathering to reverse
valid content type and object ids.
"""
raise forms.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params=params,
)
def get_content_type_id_object_id(self, value):
"""Return a tuple of ctype id, object id for value."""
return value.split('-', 1)
class QuerySetSequenceModelField(GenericModelMixin,
QuerySetSequenceFieldMixin,
forms.ModelChoiceField):
"""Replacement for ModelChoiceField supporting QuerySetSequence choices."""
def to_python(self, value):
"""
Given a string like '3-5', return the model of ctype #3 and pk 5.
Note that in the case of ModelChoiceField, to_python is also in charge
of security, it's important to get the results from self.queryset.
"""
if not value:
return value
content_type_id, object_id = self.get_content_type_id_object_id(value)
queryset = self.get_queryset_for_content_type(content_type
|
_id)
if queryset is None:
self.raise_invalid_choice()
try:
return queryset.get(pk=object_id)
except queryset.model.DoesNotExist:
self.raise_invalid_choice()
class QuerySetSequenceModelMultipleField(ContentTypeModelMultipleFieldMixin,
QuerySetSequenceFieldMixin,
forms.ModelMultipleChoiceField):
"""ModelMultipleChoice
|
Field with support for QuerySetSequence choices."""
def _deduplicate_values(self, value):
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
return frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise forms.ValidationError(
self.error_messages['list'],
code='list',
)
def _get_ctype_objects(self, values):
pks = {}
for val in values:
content_type_id, object_id = self.get_content_type_id_object_id(
val)
pks.setdefault(content_type_id, [])
pks[content_type_id].append(object_id)
return pks
def _get_queryset_for_pks(self, pks):
querysets = []
for content_type_id, object_ids in pks.items():
queryset = self.get_queryset_for_content_type(content_type_id)
if queryset is None:
self.raise_invalid_choice(
params=dict(
value='%s-%s' % (content_type_id, object_ids[0])
)
)
querysets.append(queryset.filter(pk__in=object_ids))
return QuerySetSequence(*querysets)
def _check_values(self, value):
values = self._deduplicate_values(value)
pks = self._get_ctype_objects(values)
queryset = self._get_queryset_for_pks(pks)
fetched_values = [
'%s-%s' % (ContentType.objects.get_for_model(o).pk, o.pk)
for o in queryset
]
for val in value:
if val not in fetched_values:
self.raise_invalid_choice(params={'value': val})
return queryset
|
atmtools/typhon
|
typhon/utils/sphinxext.py
|
Python
|
mit
| 1,221
| 0
|
# -*- coding: utf-8 -*-
"""This module contains custom roles to use in Sphinx.
"""
from docutils import nodes
def setup(app):
"""Install the extension.
Parameters:
app: Sphinx application context.
"""
app.add_role('arts', arts_docserver_role)
def arts_docserver_role(name, rawtext, text, lineno, inliner, options=None,
content=None):
"""Create a link to ARTS docserver.
Parameters:
name (str): The role name used in the document.
rawtext (str): The entire markup snippet, with role.
text (str): The text marked with the role.
lineno (str): The line number where rawtext appears in the input.
inliner (str): The inliner
|
instance that called us.
optio
|
ns (dict): Directive options for customization.
content (list): The directive content for customization.
Returns:
list, list: Nodes to insert into the document, System messages.
"""
if content is None:
content = []
if options is None:
options = {}
url = 'http://radiativetransfer.org/docserver-trunk/all/{}'.format(text)
node = nodes.reference(rawtext, text, refuri=url, **options)
return [node], []
|
dana-i2cat/felix
|
expedient/src/python/expedient/common/federation/geni/ch.py
|
Python
|
apache-2.0
| 15,839
| 0.00423
|
#----------------------------------------------------------------------
# Copyright (c) 2010 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
Reference GENI GCF Clearinghouse. Uses SFA Certificate and credential objects.
Run from gcf-ch.py
Will produce signed user credentials from a GID, return a
list of aggregates read from a config file, and create a new Slice Credential.
"""
import datetime
import traceback
import uuid
import os
import dateutil.parser
from SecureXMLRPCServer import SecureXMLRPCServer
from expedient.common.federation.geni.util import cred_util
from expedient.common.federation.geni.util import cert_util
from expedient.common.federation.geni.util import urn_util
from expedient.common.federation.sfa.trust import gid
# Substitute eg "openflow//stanford"
# Be sure this matches init-ca.py:CERT_AUTHORITY
# This is in publicid format
SLICE_AUTHORITY = "geni//gpo//gcf"
# Credential lifetimes in seconds
# Extend slice lifetimes to actually use the resources
USER_CRED_LIFE = 86400
SLICE_CRED_LIFE = 3600
# Make the max life of a slice 30 days (an arbitrary length).
SLICE_MAX_LIFE_SECS = 30 * 24 * 60 * 60
# The list of Aggregates that this Clearinghouse knows about
# should be defined in the gcf_config file in the am_* properties.
# ListResources will refer the client to these aggregates
# Clearinghouse.runserver currently does the register_aggregate_pair
# calls for each row in that file
# but this should be doable dynamically
# Some sample pairs:
# GPOMYPLC = ('urn:publicid:IDN+plc:gpo1+authority+sa',
# 'http://myplc1.gpolab.bbn.com:12348')
# TESTGCFAM = ('urn:publicid:IDN+geni.net:gpo+authority+gcf',
# 'https://127.0.0.1:8001')
# OTHERGPOMYPLC = ('urn:publicid:IDN+plc:gpo+authority+site2',
# 'http://128.89.81.74:12348')
# ELABINELABAM = ('urn:publicid:IDN+elabinelab.geni.emulab.net',
# 'https://myboss.elabinelab.geni.emulab.net:443/protogeni/xmlrpc/am')
class SampleClearinghouseServer(object):
"""A sample clearinghouse with barebones functionality."""
def __init__(self, delegate):
self._delegate = delegate
def GetVersion(self):
return self._delegate.GetVersion()
def CreateSlice(self, urn=None):
return self._delegate.CreateSlice(urn_req=urn)
def RenewSlice(self, urn, expire_str):
try:
return self._delegate.RenewSlice(urn, expire_str)
except:
self._delegate.logger.error(traceback.format_exc())
raise
def DeleteSlice(self, urn):
return self._delegate.DeleteSlice(urn)
def ListAggregates(self):
return self._delegate.ListAggregates()
def CreateUserCredential(self, cert):
return self._delegate.CreateUserCredential(cert)
class Clearinghouse(object):
def __init__(self):
self.logger = cred_util.logging.getLogger('gcf-ch')
self.slices = {}
self.aggs = []
def load_aggregates(self):
"""Loads aggregates from the clearinghouse section of the config file.
In the config section there are keys for each am, am_1, am_2, ..., am_n
The value for each key is the urn and url of the aggregate separated by a comma
Returns True if aggregates were loaded, False otherwise.
"""
for (key, val) in self.config['clearinghouse'].items():
if not key.startswith('am_'):
continue
(urn,url) = val.split(',')
urn = urn.strip()
url = url.strip()
if not urn:
self.logger.warn('Empty URN for aggregate %s in gcf_config' % key)
continue
if not url:
self.logger.warn('Empty URL for aggregate %s in gcf_config' % key)
continue
if urn in [x for (x, _) in self.aggs]:
self.logger.warn('Duplicate URN %s in gcf_config' % key)
continue
self.logger.info("Registering AM %s at %s", urn, url)
self.aggs.append((urn, url))
def runserver(self, addr, keyfile=None, certfile=None,
ca_certs=None, authority=None,
user_len=None, slice_len=None, config=None):
"""Run the clearinghouse server."""
# ca_certs is a dir of several certificates for peering
# If not supplied just use the certfile as the only trusted root
self.keyfile = keyfile
self.certfile = certfile
self.config = config
# Error check the keyfile, certfile all exist
if keyfile is None or not os.path.isfile(os.path.expanduser(keyfile)):
raise Exception("Missing CH key file %s" % keyfile)
if certfile is None or not os.path.isfile(os.path.expanduser(certfile)):
raise Exception("Missing CH cert file %s" % certfile)
if ca_certs is None:
ca_certs = certfile
self.logger.info("Using only my CH cert as a trusted root cert")
self.trusted_root_files = cred_util.CredentialVerifier(ca_certs).root_cert_files
if not os.path.exists(os.path.expanduser(ca_certs)):
raise Exception("Missing CA cert(s): %s" % ca_certs)
global SLICE_AUTHORITY, USER_CRED_LIFE, SLICE_CRED_LIFE
SLICE_AUTHORITY = authority
USER_CRED_LIFE = int(user_len)
SLICE_CRED_LIFE = int(slice_len)
# Load up the aggregates
self.load_aggregates()
# This is the arg to _make_server
ca_certs_onefname = cred_util.CredentialVerifier.getCAsFileFromDir(ca_certs)
# This is used below by CreateSlice
self.ca_cert_fnames = []
if os.path.isfile(os.path.expanduser(ca_certs)):
self.ca_cert_fnames = [os.path.expanduser(ca_certs)]
elif os.path.isdir(os.path.expanduser(ca_certs)):
self.ca_cert_fnames = [os.path.join(os.path.expanduser(ca_certs), name) for name in os.listdir(os.path.expanduser(ca_certs)) if name != cred_util.CredentialVerifier.CATEDCERTSFNAME]
# Create the xmlrpc server, load the rootkeys and do the ssl thing.
self._server = self._make_server(add
|
r, keyfile, certfile,
|
ca_certs_onefname)
self._server.register_instance(SampleClearinghouseServer(self))
self.logger.info('GENI CH Listening on port %d...' % (addr[1]))
self._server.serve_forever()
def _make_server(self, addr, keyfile=None, certfile=None,
ca_certs=None):
"""Creates the XML RPC server."""
# ca_certs is a file of concatenated certs
return SecureXMLRPCServer(addr, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs)
def GetVersion(self):
self.logger.info("Called GetVersion")
version = dict()
version['gcf-ch_api'] = 1
return version
# FIXME: Change that URN to be a name and non-o
|
afronski/playground-other
|
python/books/learn-python-the-hard-way/projects/ex48/ex48/parser.py
|
Python
|
mit
| 1,663
| 0.004811
|
class ParserError(Exception):
pass
class Sentence(object):
def __init__(self, subject, verb, obj):
# We take ('noun','thing') tuples and convert them.
self.subject = subject[1]
self.verb = verb[1]
self.object = obj[1]
def peek(word_list):
if word_list:
word = word_list[0]
return word[0]
else:
return None
def match(word_list, expecting):
if word_list:
word = word_list.pop(0)
if word[0] == expecting:
return word
else:
return None
else:
return None
def skip(word_list, word_type):
while peek(word_list) == word_type:
match(word_list, word_type)
def parse_verb(word_list):
skip(word_list, 'stop')
if peek(word_list) == 'verb':
return match(word_list, 'verb')
else:
raise ParserError("Expected a verb next.")
def parse_object(word_list):
skip(word_list, 'stop')
next_word = peek(word_list)
if next_word == 'noun':
return match(word_list, 'noun')
elif next_word == 'direction':
return match(word_list, 'direction')
else:
raise ParserError("Expected a noun or direction next.")
def parse_subject(word_list):
skip(word_list, 'stop')
next_word = peek(word_list)
if next_word == 'no
|
un':
return match(word_list, 'noun')
elif next_word == 'verb':
return ('noun
|
', 'player')
else:
raise ParserError("Expected a verb next.")
def parse_sentence(word_list):
subj = parse_subject(word_list)
verb = parse_verb(word_list)
obj = parse_object(word_list)
return Sentence(subj, verb, obj)
|
edx-solutions/edx-platform
|
lms/djangoapps/instructor/tests/views/test_instructor_dashboard.py
|
Python
|
agpl-3.0
| 27,999
| 0.002357
|
"""
Unit tests for instructor_dashboard.py.
"""
import datetime
import re
import ddt
import six
from django.conf import settings
from django.contrib.sites.models import Site
from django.test.utils import override_settings
from django.urls import reverse
from mock import patch
from pyquery import PyQuery as pq
from pytz import UTC
from six import text_type
from six.moves import range
from common.test.utils import XssTestMixin
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_response
from lms.djangoapps.courseware.tabs import get_course_tab_list
from lms.djangoapps.courseware.tests.factories import StaffFactory, StudentModuleFactory, UserFactory
from lms.djangoapps.courseware.tests.helpers import LoginEnrollmentTestCase
from lms.djangoapps.grades.config.waffle import WRITABLE_GRADEBOOK, waffle_flags
from lms.djangoapps.instructor.views.gradebook_api import calculate_page_info
from openedx.core.djangoapps.site_configuration.models import SiteConfiguration
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from shoppingcart.models import CourseRegCodeItem, Order, PaidCourseRegistration
from student.models import CourseEnrollment
from student.roles import CourseFinanceAdminRole
from student.tests.factories import AdminFactory, CourseAccessRoleFactory, CourseEnrollmentFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
def intercept_renderer(path, context):
"""
Intercept calls to `render_to_response` and attach the context dict to the
response for examination in unit tests.
"""
# I think Django already does this for you in their TestClient, except
# we're bypassing that by using edxmako. Probably edxmako should be
# integrated better with Django's rendering and event system.
response = render_to_response(path, context)
response.mako_context = context
response.mako_template = path
return response
@ddt.ddt
class TestInstructorDashboard(ModuleStoreTestCase, LoginEnrollmentTestCase, XssTestMixin):
"""
Tests for the instructor dashboard (not legacy).
"""
def setUp(self):
"""
Set up tests
"""
super(TestInstructorDashboard, self).setUp()
self.course = CourseFactory.create(
grading_policy={"GRADE_CUTOFFS": {"A": 0.75, "B": 0.63, "C": 0.57, "D": 0.5}},
display_name='<script>alert("XSS")</script>'
)
self.course_mode = CourseMode(
course_id=self.course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE.name,
min_price=40
)
self.course_info = CourseFactory.create(
org="ACME",
number="001",
run="2017",
name="How to defeat the Road Runner"
)
self.course_mode.save()
# Create instructor account
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(self.course.id)})
def get_dashboard_enrollment_message(self):
"""
Returns expected dashboard enrollment message with link to Insights.
"""
return u'Enrollment data is now available in <a href="http://example.com/courses/{}" ' \
'rel="noopener" target="_blank">Example</a>.'.format(text_type(self.course.id))
def get_dashboard_analytics_message(self):
"""
Returns expected dashboard demographic message with link to Insights.
"""
return u'For analytics about your course, go to <a href="http://example.com/courses/{}" ' \
'rel="noopener" target="_blank">Example</a>.'.format(text_type(self.course.id))
def test_instructor_tab(self):
"""
Verify that the instructor tab appears for staff only.
"""
def has_instructor_tab(user, course):
"""Returns true if the "Instructor" tab is shown."""
tabs = get_course_tab_list(user, course)
return len([tab for tab in tabs if tab.name == 'Instructor']) == 1
self.assertTrue(has_instructor_tab(self.instructor, self.course))
staff = StaffFactory(course_key=self.course.id)
self.assertTrue(has_instructor_tab(staff, self.course))
student = UserFactory.create()
self.assertFalse(has_instructor_tab(student, self.course))
researcher = UserFactory.create()
CourseAccessRoleFactory(
course_id=self.course.id,
user=researcher,
role='data_researcher',
org=self.course.id.org
)
self.assertTrue(has_instructor_tab(researcher, self.course))
org_researcher = UserFactory.create()
CourseAccessRoleFactory(
course_id=None,
user=org_researcher,
role='data_researcher',
org=self.course.id.org
)
self.assertTrue(has_instructor_tab(org_researcher, self.course))
@ddt.data(
('staff', False),
('instructor', False),
('data_researcher', True),
('global_staff', True),
)
@ddt.unpack
def test_data_download(self, access_role, can_access):
"""
Verify that the Data Download tab only s
|
hows up for certain roles
"""
download_section = '<li class="nav-item"><button type="button" class="btn-link data_download" '\
'data-section="data_download">Data Download</button></li>'
user = UserFactory.create(is_st
|
aff=access_role == 'global_staff')
CourseAccessRoleFactory(
course_id=self.course.id,
user=user,
role=access_role,
org=self.course.id.org
)
self.client.login(username=user.username, password="test")
response = self.client.get(self.url)
if can_access:
self.assertContains(response, download_section)
else:
self.assertNotContains(response, download_section)
@override_settings(ANALYTICS_DASHBOARD_URL='http://example.com')
@override_settings(ANALYTICS_DASHBOARD_NAME='Example')
def test_data_download_only(self):
"""
Verify that only the data download tab is visible for data researchers.
"""
user = UserFactory.create()
CourseAccessRoleFactory(
course_id=self.course.id,
user=user,
role='data_researcher',
org=self.course.id.org
)
self.client.login(username=user.username, password="test")
response = self.client.get(self.url)
matches = re.findall(
rb'<li class="nav-item"><button type="button" class="btn-link .*" data-section=".*">.*',
response.content
)
assert len(matches) == 1
@ddt.data(
("How to defeat the Road Runner", "2017", "001", "ACME"),
)
@ddt.unpack
def test_instructor_course_info(self, display_name, run, number, org):
"""
Verify that it shows the correct course information
"""
url = reverse(
'instructor_dashboard',
kwargs={
'course_id': six.text_type(self.course_info.id)
}
)
response = self.client.get(url)
content = pq(response.content)
self.assertEqual(
display_name,
content('#field-course-display-name b').contents()[0].strip()
)
self.assertEqual(
run,
content('#field-course-name b').contents()[0].strip()
)
self.assertEqual(
number,
content('#field-course-number b').contents()[0].strip()
)
self.assertEqual(
org,
content('#field-course-organization b').contents()[0].strip()
)
@ddt.data(True, False)
|
gtest-org/test10
|
jenkins_jobs/modules/zuul.py
|
Python
|
apache-2.0
| 6,120
| 0.00049
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Zuul module adds triggers that configure jobs for use with Zuul_.
To change the Zuul notification URL, set a global default::
- defaults:
name: global
zuul-url: http://127.0.0.1:8001/jenkins_endpoint
The above URL is the default.
.. _Zuul: http://ci.openstack.org/zuul/
"""
def zuul():
"""yaml: zuul
Configure this job to be triggered by Zuul.
Example::
triggers:
- zuul
"""
def zuul_post():
"""yaml: zuul-post
Configure this post-merge job to be triggered by Zuul.
Example::
triggers:
- zuul-post
"""
import jenkins_jobs.modules.base
ZUUL_PARAMETERS = [
{'string':
{'description': 'Zuul provided key to link builds with Gerrit events',
'name': 'ZUUL_UUID'}},
{'string':
{'description': 'Zuul provided key to link builds with Gerrit'
' events (deprecated use ZUUL_UUID instead)',
'name': 'UUID'}},
{'string':
{'description': 'Zuul pipeline triggering this job',
'name': 'ZUUL_PIPELINE'}},
{'string':
{'description': 'Zuul provided project name',
'name': 'GERRIT_PROJECT'}},
{'string':
{'description': 'Branch name of triggering project',
'name': 'ZUUL_PROJECT'}},
{'string':
{'description': 'Zuul provided branch name',
'name': 'GERRIT_BRANCH'}},
{'string':
{'description': 'Branch name of triggering change',
'name': 'ZUUL_BRANCH'}},
{'string':
{'description': 'Zuul provided list of dependent changes to merge',
'name': 'GERRIT_CHANGES'}},
{'string':
{'description': 'List of dependent changes to merge',
'name': 'ZUUL_CHANGES'}},
{'string':
{'description': 'Reference for the merged commit(s) to use',
'name': 'ZUUL_REF'}},
{'string':
{'description': 'The commit SHA1 at the head of ZUUL_REF',
'name': 'ZUUL_COMMIT'}},
{'string':
{'description': 'List of included changes',
'name': 'ZUUL_CHANGE_IDS'}},
{'string':
{'description': 'ID of triggering change',
'name': 'ZUUL_CHANGE'}},
{'string':
{'description': 'Patchset of triggering change',
'name': 'ZUUL_PATCHSET'}},
]
ZUUL_POST_PARAMETERS = [
{'string':
{'description': 'Zuul provided key to link builds with Gerrit events',
'name': 'ZUUL_UUID'}},
{'string':
{'description': 'Zuul provided key to link builds with Gerrit'
' events (deprecated use ZUUL_UUID instead)',
'name': 'UUID'}},
{'string':
{'description': 'Zuul pipeline triggering this job',
'name': 'ZUUL_PIPELINE'}},
{'string':
{'description': 'Zuul provided project name',
'name': 'GERRIT_PROJECT'}},
{'string':
{'description': 'Branch name of triggering project',
'name': 'ZUUL_PROJECT'}},
{'string':
{'description': 'Zuul provided ref name',
'name': 'GERRIT_REFNAME'}},
{'string':
{'description': 'Name of updated reference triggering this job',
'name': 'ZUUL_REF'}},
{'string':
{'description': 'Name of updated reference triggering this job',
'name': 'ZUUL_REFNAME'}},
{'string':
{'description': 'Zuul provided old reference for ref-updated',
'name': 'GERRIT_OLDREV'}},
{'string':
{'description': 'Old SHA at this reference',
'name': 'ZUUL_
|
OLDREV'}},
{'string':
{'description': 'Zuul provided new reference for
|
ref-updated',
'name': 'GERRIT_NEWREV'}},
{'string':
{'description': 'New SHA at this reference',
'name': 'ZUUL_NEWREV'}},
{'string':
{'description': 'Shortened new SHA at this reference',
'name': 'ZUUL_SHORT_NEWREV'}},
]
DEFAULT_URL = 'http://127.0.0.1:8001/jenkins_endpoint'
class Zuul(jenkins_jobs.modules.base.Base):
sequence = 0
def handle_data(self, parser):
changed = False
jobs = (parser.data.get('job', {}).values() +
parser.data.get('job-template', {}).values())
for job in jobs:
triggers = job.get('triggers')
if not triggers:
continue
if ('zuul' not in job.get('triggers', []) and
'zuul-post' not in job.get('triggers', [])):
continue
if 'parameters' not in job:
job['parameters'] = []
if 'notifications' not in job:
job['notifications'] = []
# This isn't a good pattern, and somewhat violates the
# spirit of the global defaults, but Zuul is working on
# a better design that should obviate the need for most
# of this module, so this gets it doen with minimal
# intrusion to the rest of JJB.
if parser.data.get('defaults', {}).get('global'):
url = parser.data['defaults']['global'].get(
'zuul-url', DEFAULT_URL)
notifications = [{'http': {'url': url}}]
job['notifications'].extend(notifications)
if 'zuul' in job.get('triggers', []):
job['parameters'].extend(ZUUL_PARAMETERS)
job['triggers'].remove('zuul')
if 'zuul-post' in job.get('triggers', []):
job['parameters'].extend(ZUUL_POST_PARAMETERS)
job['triggers'].remove('zuul-post')
changed = True
return changed
|
lidel/mmda
|
tags/urls.py
|
Python
|
cc0-1.0
| 145
| 0.006897
|
from django.conf.urls.defaults import *
urlpatterns =
|
patterns('mmda.tags.views',
url(r'^(?P<tag_id>.+?)/$'
|
, 'show_tag', name='show-tag')
)
|
MariaSolovyeva/inasafe
|
safe/impact_functions/generic/continuous_hazard_population/impact_function.py
|
Python
|
gpl-3.0
| 10,346
| 0
|
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool by AusAid - **Generic Impact Function
on Population for Continuous Hazard.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
.. todo:: Check raster is single band
"""
import numpy
from safe.impact_functions.generic\
.continuous_hazard_population.metadata_definitions import \
ContinuousHazardPopulationMetadata
from safe.impact_functions.bases.continuous_rh_continuous_re import \
ContinuousRHContinuousRE
from safe.impact_functions.impact_function_manager import ImpactFunctionManager
from safe.impact_functions.core import (
evacuated_population_needs,
population_rounding,
has_no_data)
from safe.storage.raster import Raster
from safe.utilities.i18n import tr
from safe.common.utilities import format_int
from safe.common.tables import Table, TableRow
from safe.common.utilities import create_classes, create_label, humanize_class
from safe.common.exceptions import (
FunctionParametersError, ZeroImpactException)
from safe.gui.tools.minimum_needs.needs_profile import add_needs_parameters, \
filter_needs_parameters
__author__ = 'lucernae'
__date__ = '24/03/15'
__revision__ = '$Format:%H$'
__copyright__ = ('Copyright 2014, Australia Indonesia Facility for '
'Disaster Reduction')
class Contin
|
uousHazardPopulationFunction(ContinuousRHContinuousRE):
# noinspection PyUnresolvedRef
|
erences
"""Plugin for impact of population as derived by continuous hazard."""
_metadata = ContinuousHazardPopulationMetadata()
def __init__(self):
super(ContinuousHazardPopulationFunction, self).__init__()
self.impact_function_manager = ImpactFunctionManager()
# AG: Use the proper minimum needs, update the parameters
self.parameters = add_needs_parameters(self.parameters)
def _tabulate(
self,
high,
low,
medium,
question,
total_impact):
# Generate impact report for the pdf map
table_body = [
question,
TableRow([tr('People impacted '),
'%s' % format_int(total_impact)],
header=True),
TableRow([tr('People in high hazard area '),
'%s' % format_int(high)],
header=True),
TableRow([tr('People in medium hazard area '),
'%s' % format_int(medium)],
header=True),
TableRow([tr('People in low hazard area'),
'%s' % format_int(low)],
header=True)]
return table_body
def _tabulate_notes(
self,
minimum_needs,
table_body,
total,
total_impact,
no_data_warning):
# Extend impact report for on-screen display
table_body.extend([
TableRow(tr('Notes'), header=True),
tr('Map shows population count in high, medium, and low hazard '
'area.'),
tr('Total population: %s') % format_int(total),
TableRow(tr(
'Table below shows the minimum needs for all '
'affected people'))])
if no_data_warning:
table_body.extend([
tr('The layers contained `no data`. This missing data was '
'carried through to the impact layer.'),
tr('`No data` values in the impact layer were treated as 0 '
'when counting the affected or total population.')
])
total_needs = evacuated_population_needs(
total_impact, minimum_needs)
for frequency, needs in total_needs.items():
table_body.append(TableRow(
[
tr('Needs should be provided %s' % frequency),
tr('Total')
],
header=True))
for resource in needs:
table_body.append(TableRow([
tr(resource['table name']),
format_int(resource['amount'])]))
return table_body, total_needs
def run(self):
"""Plugin for impact of population as derived by continuous hazard.
Hazard is reclassified into 3 classes based on the extrema provided
as impact function parameters.
Counts number of people exposed to each category of the hazard
:returns:
Map of population exposed to high category
Table with number of people in each category
"""
self.validate()
self.prepare()
thresholds = [
p.value for p in self.parameters['Categorical thresholds'].value]
# Thresholds must contain 3 thresholds
if len(thresholds) != 3:
raise FunctionParametersError(
'The thresholds must consist of 3 values.')
# Thresholds must monotonically increasing
monotonically_increasing_flag = all(
x < y for x, y in zip(thresholds, thresholds[1:]))
if not monotonically_increasing_flag:
raise FunctionParametersError(
'Each threshold should be larger than the previous.')
# The 3 categories
low_t = thresholds[0]
medium_t = thresholds[1]
high_t = thresholds[2]
# Extract data as numeric arrays
hazard_data = self.hazard.layer.get_data(nan=True) # Category
no_data_warning = False
if has_no_data(hazard_data):
no_data_warning = True
# Calculate impact as population exposed to each category
exposure_data = self.exposure.layer.get_data(nan=True, scaling=True)
if has_no_data(exposure_data):
no_data_warning = True
# Make 3 data for each zone. Get the value of the exposure if the
# exposure is in the hazard zone, else just assign 0
low_exposure = numpy.where(hazard_data < low_t, exposure_data, 0)
medium_exposure = numpy.where(
(hazard_data >= low_t) & (hazard_data < medium_t),
exposure_data, 0)
high_exposure = numpy.where(
(hazard_data >= medium_t) & (hazard_data <= high_t),
exposure_data, 0)
impacted_exposure = low_exposure + medium_exposure + high_exposure
# Count totals
total = int(numpy.nansum(exposure_data))
low_total = int(numpy.nansum(low_exposure))
medium_total = int(numpy.nansum(medium_exposure))
high_total = int(numpy.nansum(high_exposure))
total_impact = high_total + medium_total + low_total
# Check for zero impact
if total_impact == 0:
table_body = [
self.question,
TableRow(
[tr('People impacted'),
'%s' % format_int(total_impact)], header=True)]
message = Table(table_body).toNewlineFreeString()
raise ZeroImpactException(message)
# Don't show digits less than a 1000
total = population_rounding(total)
total_impact = population_rounding(total_impact)
low_total = population_rounding(low_total)
medium_total = population_rounding(medium_total)
high_total = population_rounding(high_total)
minimum_needs = [
parameter.serialize() for parameter in
filter_needs_parameters(self.parameters['minimum needs'])
]
table_body = self._tabulate(
high_total, low_total, medium_total, self.question, total_impact)
impact_table = Table(table_body).toNewlineFreeString()
table_body, total_needs = self._tabulate_notes(
minimum_needs, table_body, total, total_impact, no_data_warning)
impact_summary = Table(table_body).toNewlineFreeString()
map_title = tr('People in each hazard areas (low, medium, hig
|
Laurawly/tvm-1
|
tests/python/contrib/test_cutlass.py
|
Python
|
apache-2.0
| 18,515
| 0.002268
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import math
import pytest
import tvm
from tvm import relay
import numpy as np
from tvm.runtime.vm import VirtualMachine
from tvm.relay.op.contrib.cutlass import partition_for_cutlass
from tvm.contrib.cutlass import (
tune_cutlass_kernels,
build_cutlass_kernels,
build_cutlass_kernels_vm,
)
logging.basicConfig(level=logging.INFO)
def has_cublas():
return tvm.get_global_func("tvm.contrib.cublas.matmul", True) != None
def has_cutlass():
return tvm.get_global_func("relay.ext.cutlass", True) != None
def get_ref_rt_mod(mod, params, target="cuda"):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
dev = tvm.device(target, 0)
rt_mod = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
return rt_mod, dev
def get_ref_vm(mod, params, target="cuda"):
with tvm.transform.PassContext(opt_level=3):
vm_exec = relay.vm.compile(mod, target=target, params=params)
code, lib = vm_exec.save()
dev = tvm.device(target, 0)
vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib)
return VirtualMachine(vm_exec, dev), dev
def get_output(rt_mod, names, inputs):
for name, inp in zip(names, inputs):
rt_mod.set_input(name
|
, inp)
rt_mod.run()
return rt_mod.get_output(0).asnumpy()
def get_output_vm(vm, names, in
|
puts):
params = dict(zip(names, inputs))
return vm.invoke("main", **params).numpy()
def get_dense_with_shape(data_shape, weight_shape, out_dtype="float16"):
data = relay.var("data", shape=data_shape, dtype="float16")
weight = relay.var("weight", shape=weight_shape, dtype="float16")
return relay.nn.dense(data, weight, out_dtype=out_dtype)
def get_dense(M, N, K, out_dtype="float16"):
return get_dense_with_shape((M, K), (N, K), out_dtype)
def get_dense_bias(M, N, K, out_dtype="float16"):
dense = get_dense(M, N, K, out_dtype=out_dtype)
bias = relay.var("bias", shape=(N,), dtype=out_dtype)
return relay.nn.bias_add(dense, bias)
def get_dense_bias_relu(M, N, K, out_dtype="float16"):
return relay.nn.relu(get_dense_bias(M, N, K, out_dtype=out_dtype))
def get_dense_bias_gelu(M, N, K, out_dtype="float16"):
bias_add = get_dense_bias(M, N, K, out_dtype)
mul = bias_add * relay.const((1.0 / math.sqrt(2.0)), dtype=out_dtype)
if out_dtype == "float16":
erf = relay.cast(relay.op.erf(relay.cast(mul, "float32")), "float16")
else:
erf = relay.op.erf(mul)
mul_half = erf * relay.const(0.5, dtype=out_dtype)
add = mul_half + relay.const(0.5, dtype=out_dtype)
return add * bias_add
def get_batch_matmul_with_shape(x_shape, y_shape, out_dtype="float16"):
x = relay.var("x", shape=x_shape, dtype="float16")
y = relay.var("y", shape=y_shape, dtype="float16")
return relay.nn.batch_matmul(x, y, out_dtype=out_dtype)
def get_batch_matmul(batch, M, N, K, out_dtype="float16"):
return get_batch_matmul_with_shape((batch, M, K), (batch, N, K), out_dtype="float16")
def get_conv2d_nchw(d_shape, w_shape, padding, out_dtype="float16"):
data = relay.var("data", shape=d_shape, dtype="float16")
weight = relay.var("weight", shape=w_shape, dtype="float16")
out_channel = w_shape[0]
return relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
out_dtype=out_dtype,
)
def get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype="float16"):
conv2d = get_conv2d_nchw(d_shape, w_shape, padding, out_dtype=out_dtype)
bias = relay.var("bias", shape=(w_shape[0],), dtype=out_dtype)
return relay.nn.bias_add(conv2d, bias)
def silu(x):
return x * relay.sigmoid(x)
def hardswish(x, out_dtype="float16"):
return x * (
relay.clip(x + relay.const(3, dtype=out_dtype), a_min=0, a_max=6)
/ relay.const(6, dtype=out_dtype)
)
def get_conv2d_nchw_bias_relu(d_shape, w_shape, padding, out_dtype="float16"):
return relay.nn.relu(get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype))
def get_conv2d_nchw_bias_sigmoid(d_shape, w_shape, padding, out_dtype="float16"):
return relay.sigmoid(get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype))
def get_conv2d_nchw_bias_silu(d_shape, w_shape, padding, out_dtype="float16"):
conv_out = get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype)
return silu(conv_out)
def get_conv2d_nchw_bias_hardswish(d_shape, w_shape, padding, out_dtype="float16"):
conv_out = get_conv2d_nchw_bias(d_shape, w_shape, padding, out_dtype=out_dtype)
return hardswish(conv_out, out_dtype)
def get_conv2d_nchw_bias_residual(d_shape, w_shape, padding, out_dtype="float16"):
data = relay.var("data", shape=d_shape, dtype="float16")
weight = relay.var("weight", shape=w_shape, dtype="float16")
bias = relay.var("bias", shape=(w_shape[0],), dtype=out_dtype)
out_channel = w_shape[0]
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
out_dtype=out_dtype,
)
bias_add = relay.nn.bias_add(conv2d, bias)
return bias_add, data
def profile_and_build(mod, params, sm, tmp_dir="./tmp", lib_path="compile.so", use_fast_math=False):
mod = partition_for_cutlass(mod)
mod, num_cutlass_partition = tune_cutlass_kernels(
mod, sm, profile_all=False, use_multiprocessing=False, tmp_dir=tmp_dir
)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target="cuda", params=params)
lib = build_cutlass_kernels(lib, sm, tmp_dir, lib_path, use_fast_math=use_fast_math)
dev = tvm.device("cuda", 0)
rt_mod = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
return rt_mod, dev, num_cutlass_partition
def profile_and_build_vm(
mod,
params,
sm,
tmp_dir="./tmp",
lib_path="compile.so",
vmcode_path="vmcode.ro",
use_fast_math=False,
):
mod = partition_for_cutlass(mod)
mod, num_cutlass_partition = tune_cutlass_kernels(mod, sm, tmp_dir=tmp_dir)
with tvm.transform.PassContext(opt_level=3):
vm_exec = relay.vm.compile(mod, target="cuda", params=params)
vm_exec = build_cutlass_kernels_vm(
vm_exec, sm, tmp_dir, lib_path, vmcode_path, use_fast_math=use_fast_math
)
dev = tvm.device("cuda", 0)
return VirtualMachine(vm_exec, dev), dev, num_cutlass_partition
def verify_dense(
func, M, N, K, ref_target="cuda", sm=80, atol=1e-5, rtol=1e-5, run_benchmark=False
):
if not has_cutlass():
return
mod = tvm.IRModule.from_expr(func)
typ = relay.transform.InferType()(mod)["main"].body.checked_type
out_dtype = typ.dtype
use_vm = any(isinstance(s, tvm.tir.Any) for s in typ.shape)
np_data = np.random.uniform(-1, 1, (M, K)).astype("float16")
np_weight = np.random.uniform(-1, 1, (N, K)).astype("float16")
np_bias = np.random.uniform(-1, 1, (N,)).astype(out_dtype)
params = {"weight": np_weight, "bias": np_bias}
if use_vm:
if ref_target == "cuda" and out_dtype == "float16":
# Uncomment "return" below to see the accuracy difference of static vs dynamic TVM native fp16 dense
# The static one can use a tensorcore schedule, but the dynami
|
rafasashi/userinfuser
|
serverside/fantasm/utils.py
|
Python
|
gpl-3.0
| 4,909
| 0.006926
|
""" Fantasm: A taskqueue-based Finite State Machine for App Engine Python
Docs and examples: http://code.google.com/p/fantasm/
Copyright 2010 VendAsta Technologies Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the Licen
|
se for the specific language governing permissions and
limitations under the License.
"""
from fantasm import constants
from google.appengine.api.taskqueue.taskqueue import Queue
class NoOpQueue( Queue ):
""" A Queue instance that does not Queue """
def add(self, task, transactional=False):
""" see taskqueue.Queue.add """
pass
def knuthHash(number):
"""A decent hash fu
|
nction for integers."""
return (number * 2654435761) % 2**32
def boolConverter(boolStr):
""" A converter that maps some common bool string to True """
return {'1': True, 'True': True, 'true': True}.get(boolStr, False)
def outputAction(action):
""" Outputs the name of the action
@param action: an FSMAction instance
"""
if action:
return str(action.__class__.__name__).split('.')[-1]
def outputTransitionConfig(transitionConfig):
""" Outputs a GraphViz directed graph node
@param transitionConfig: a config._TransitionConfig instance
@return: a string
"""
label = transitionConfig.event
if transitionConfig.action:
label += '/ ' + outputAction(transitionConfig.action)
return '"%(fromState)s" -> "%(toState)s" [label="%(label)s"];' % \
{'fromState': transitionConfig.fromState.name,
'toState': transitionConfig.toState.name,
'label': label}
def outputStateConfig(stateConfig, colorMap=None):
""" Outputs a GraphViz directed graph node
@param stateConfig: a config._StateConfig instance
@return: a string
"""
colorMap = colorMap or {}
actions = []
if stateConfig.entry:
actions.append('entry/ %(entry)s' % {'entry': outputAction(stateConfig.entry)})
if stateConfig.action:
actions.append('do/ %(do)s' % {'do': outputAction(stateConfig.action)})
if stateConfig.exit:
actions.append('exit/ %(exit)s' % {'exit': outputAction(stateConfig.exit)})
label = '%(stateName)s|%(actions)s' % {'stateName': stateConfig.name, 'actions': '\\l'.join(actions)}
if stateConfig.continuation:
label += '|continuation = True'
if stateConfig.fanInPeriod != constants.NO_FAN_IN:
label += '|fan in period = %(fanin)ds' % {'fanin': stateConfig.fanInPeriod}
shape = 'Mrecord'
if colorMap.get(stateConfig.name):
return '"%(stateName)s" [style=filled,fillcolor="%(fillcolor)s",shape=%(shape)s,label="{%(label)s}"];' % \
{'stateName': stateConfig.name,
'fillcolor': colorMap.get(stateConfig.name, 'white'),
'shape': shape,
'label': label}
else:
return '"%(stateName)s" [shape=%(shape)s,label="{%(label)s}"];' % \
{'stateName': stateConfig.name,
'shape': shape,
'label': label}
def outputMachineConfig(machineConfig, colorMap=None, skipStateNames=None):
""" Outputs a GraphViz directed graph of the state machine
@param machineConfig: a config._MachineConfig instance
@return: a string
"""
skipStateNames = skipStateNames or ()
lines = []
lines.append('digraph G {')
lines.append('label="%(machineName)s"' % {'machineName': machineConfig.name})
lines.append('labelloc="t"')
lines.append('"__start__" [label="start",shape=circle,style=filled,fillcolor=black,fontcolor=white,fontsize=9];')
lines.append('"__end__" [label="end",shape=doublecircle,style=filled,fillcolor=black,fontcolor=white,fontsize=9];')
for stateConfig in machineConfig.states.values():
if stateConfig.name in skipStateNames:
continue
lines.append(outputStateConfig(stateConfig, colorMap=colorMap))
if stateConfig.initial:
lines.append('"__start__" -> "%(stateName)s"' % {'stateName': stateConfig.name})
if stateConfig.final:
lines.append('"%(stateName)s" -> "__end__"' % {'stateName': stateConfig.name})
for transitionConfig in machineConfig.transitions.values():
if transitionConfig.fromState.name in skipStateNames or \
transitionConfig.toState.name in skipStateNames:
continue
lines.append(outputTransitionConfig(transitionConfig))
lines.append('}')
return '\n'.join(lines)
|
enthought/etsproxy
|
enthought/plugins/remote_editor/i_remote_shell.py
|
Python
|
bsd-3-clause
| 114
| 0
|
# proxy module
from __future__ import absolute_import
from envisage.plugins.remote_editor.i_remote_shell import
|
*
|
|
ideascube/pibox-installer
|
ansiblecube/tests/conftest.py
|
Python
|
gpl-3.0
| 733
| 0
|
import os
def get_files(extension):
for root, dirnames, filenames in os.walk("."):
for filename in filenames:
if filename.endswith(extension):
yield os.path.join(root, filename)
def get_roles():
return sorted(os.listdir("./roles"))
def pytest_generate_tests(metafunc):
if "jinja2_file" in metafunc
|
.fixturenames:
metafunc.parametrize("jinja2_file", get_files(".j2"))
if "json_file" in metafunc.fixturenames:
metafunc.parametrize("json_file", get_files(".json"))
if "ini_file" in metafunc.fixturenames:
metafunc.parametrize("ini_file", get_files(".fact"))
if "role" in metafunc.
|
fixturenames:
metafunc.parametrize("role", get_roles())
|
oldani/nanodegree-blog
|
app/models/datastore_adapter.py
|
Python
|
mit
| 2,094
| 0
|
from flask_user import DBAdapter
class DataStoreAdapter(DBAdapter):
""" An Wrapper to be use by Flask User to interact with
the database in this case, the DataStore """
def __init__(self, db, objMOdel):
super().__init__(db, objMOdel)
def get_object(self, ObjectClass, pk):
""" Retrieve an single Entity specified by a pk or id. """
return ObjectClass.get(pk)
def find_all_objects(self, ObjectClass, **kwargs):
""" Retrieve all Entity matching all the filters in kwargs. """
# TODO:
# The filters should be case sensitive
for field, value in kwargs.items():
ObjectClass.add_query_filter(field, "=", value)
|
return ObjectClass.fetch()
def find_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first Entity matching the filters in
kwargs or None. """
# TODO:
# The filters should be case sensitive
for field, value in kwargs.items():
ObjectClass.add_query_filter(field, "=", value)
entity = ObjectClass.fetch(limit=1)
return entity
def ifind_first_object(self, ObjectClass, **kwargs):
"
|
"" Retrieve the first Entity matching the filters in
kwargs or None. """
# TODO:
# The filters should be case insensitive
for field, value in kwargs.items():
ObjectClass.add_query_filter(field, "=", value)
entity = ObjectClass.fetch(limit=1)
return entity
def add_object(self, ObjectClass, **kwargs):
""" Create an Entity with the fields specified in kwargs. """
entity = ObjectClass(**kwargs)
entity.put()
return entity
def update_object(self, entity, **kwargs):
""" Update an Entity with the fields specified in kwargs. """
entity.update(**kwargs)
return entity
def delete_object(self, entity):
""" Delete and Entity. """
return entity.delete(entity.id)
def commit(self):
""" Should commit a session connection to the DataStore. """
pass
|
bgruening/gemini
|
gemini/tool_homozygosity_runs.py
|
Python
|
mit
| 7,165
| 0.00321
|
from __future__ import absolute_import
import os
import sys
from collections import defaultdict
from .gemini_constants import *
from . import GeminiQuery
class Site(object):
def __init__(self, row):
self.chrom = row['chrom']
self.end = int(row['end'])
self.gt_type = None
def _prune_run(run):
"""
Prune the current run of genotypes.
Remove genotypes from the left of the first
non-homozygous genotype, since, owing to the
same logic behind run length encoding, those
genotypes cannot be part of a longer run than
we have seen before.
For example:
breaks = * * *
run = H H H h H H H U H H H h H H H H H H
prune_1 = H H H U H H H h H H H H H H
prune_2 = H H H h H H H H H H
prune_3 = H H H H H H
"""
try:
first_het_idx = run.index('H')
except:
first_het_idx = None
try:
first_unk_idx = run.index('U')
except:
first_unk_idx = None
if first_het_idx is not None and first_unk_idx is not None:
idx_of_first_disruption = min(run.index('H'), run.index('U'))
elif first_het_idx is not None:
idx_of_first_disruption = first_het_idx
elif first_unk_idx is not None:
idx_of_first_disruption = first_unk_idx
else:
# no interuptions, return an empty list
return 0, 0, len(run), []
hets_removed = run[0:idx_of_first_disruption+1].count('H')
unks_removed = run[0:idx_of_first_disruption+1].count('U')
homs_removed = idx_of_first_disruption - (hets_removed + unks_removed) + 1
return hets_removed, unks_removed, homs_removed, run[idx_of_first_disruption+1:]
def sweep_genotypes_for_rohs(args, chrom, samples):
"""
Sweep through the genotypes for each sample in search of ROHs.
Note: If the genotype was homozygous, the end position
of the variant is stored. Otherwise 'H' for het
and 'U' for unknown.
"""
hom_count = 0
het_count = 0
unk_count = 0
curr_run = []
for sample in samples:
sites = iter(samples[sample])
for site in sites:
# retain the last homozygote from previous
# run. See function docs for details
if len(curr_run):
hets_removed, unks_removed, homs_removed, curr_run = \
_prune_run(curr_run)
# reset for next run
hom_count -= homs_removed
het_count -= hets_removed
unk_count -= unks_removed
# sweep through the active sites until we encounter
# too many HETS or UNKNOWN genotypes.
while het_count <= args.max_hets and unk_count <= args.max_unknowns:
if site != 'H' and site != 'U':
hom_count +=1
curr_run.append(site)
elif site == 'H':
curr_run.append(site)
het_count += 1
elif site == 'U':
curr_run.append(site)
unk_count += 1
try:
site = next(sites)
except StopIteration:
break
# skip the current run unless it contains enough sites.
if hom_count >= args.min_snps:
run_start = min(c for c in curr_run if c not in ['H', 'U'])
run_end = max(c for c in curr_run if c not in ['H', 'U'])
run_length = run_end - run_start
# report the run if it is long enough.
if run_length >= args.min_size:
density_per_kb = float(len(curr_run) * 1000) / float(run_length)
print("\t".join(str(s) for s in [chrom,
run_start, run_end, sample,
hom_count, round(density_per_kb, 4),
run_length]))
else:
curr_run = []
hom_count = 0
het_count = 0
unk_count = 0
def get_homozygosity_runs(args):
gq = GeminiQuery.GeminiQuery(args.db)
# get a mapping of sample ids to sample indices
idx2smp = gq.index2sample
smp2idx = gq.sample2index
sm_index = []
# prepare a lookup of just the samples
# for which the user wishes to search for ROHs
if args.samples is not None:
sample_filter = args.samples.strip().split(",")
for sample in sample_filter:
try:
idx = smp2idx[sample]
except:
raise ValueError("Sample %s could not be found.\n" \
% (sample))
sm_index.append(smp2idx[sample])
else:
for sample in smp2idx:
sm_index.append(smp2idx[sample])
###########################################################################
# Phase 1. Retrieve the variants for each chrom/sample
###########################################################################
quer
|
y = "SELECT chrom, start, end, gt_types, gt_depths \
FROM variants \
WHERE type = 'snp' \
AND filter is NULL \
AND depth >= " + str(args.min_total_depth) + \
" ORDER BY chrom, end"
sys.stderr.write("LOG: Querying and ordering variants by chromosomal position.\n")
gq.run(query, needs
|
_genotypes=True)
print("\t".join(['chrom',
'start', 'end', 'sample',
'num_of_snps','density_per_kb',
'run_length_in_bp']))
variants_seen = 0
samples = defaultdict(list)
prev_chrom = None
curr_chrom = None
for row in gq:
variants_seen += 1
if variants_seen % 10000 == 0:
sys.stderr.write("LOG: Loaded %d variants. Current variant on %s, position %d.\n" \
% (variants_seen, row['chrom'], row['end']))
gt_types = row['gt_types']
gt_depths = row['gt_depths']
curr_chrom = row['chrom']
# the chromosome has changed. search for ROHs in the previous chrom
if curr_chrom != prev_chrom and prev_chrom is not None:
sweep_genotypes_for_rohs(args, prev_chrom, samples)
samples = defaultdict(list)
# associate the genotype for the variant with each sample
for idx in sm_index:
sample = idx2smp[idx]
gt_type = gt_types[idx]
depth = gt_depths[idx]
# the genotype must have had sufficient depth to be considered
if depth < args.min_genotype_depth:
continue
if (gt_type == HOM_ALT or gt_type == HOM_REF):
samples[sample].append(row['end'])
elif gt_type == HET:
samples[sample].append('H')
elif gt_type == UNKNOWN:
samples[sample].append('U')
prev_chrom = curr_chrom
# search for ROHs in the final chromosome
sweep_genotypes_for_rohs(args, curr_chrom, samples)
def run(parser, args):
if os.path.exists(args.db):
# run the roh caller
get_homozygosity_runs(args)
|
gamesbrewer/kegger
|
kegger/myapp/flaskmyappname/models.py
|
Python
|
cc0-1.0
| 496
| 0.008065
|
from google.appengine.ext import ndb
class Users(ndb.Model):
password = ndb.StringProperty(required=True)
full_name = ndb.StringProperty(required=True)
phone_no = ndb.StringProperty(required=False)
timestamp = ndb.DateTimeProperty(auto_now
|
_add=True)
@classmethod
def query_user(cls, ancestor_key):
return cls.query(ancestor=ancestor_key).order(c
|
ls.full_name)
@classmethod
def user_key(cls, user_email):
return ndb.Key('MyAppName_User', user_email)
|
hanlin-he/UTD
|
leetcode/py/053.py
|
Python
|
mit
| 1,020
| 0.013725
|
# 053. Maximum Subarray
# The simple O(n) solution.
import unittest
class Solution(object):
def maxSubArray(self, nums)
|
:
"""
:type nums: List[int]
:rtype: int
"""
ret = nums[0]
pre = nums[0]
for i in nums[1:]:
if ret < i and ret < 0:
ret = pre = i
continue
cur = pre + i
if ret < cur:
ret = pre = cur
continue
if cur >= 0:
|
pre = cur
continue
# if cur < 0: # Better start over.
pre = 0
return ret
class SolutionUnitTest(unittest.TestCase):
def setup(self):
pass
def tearDown(self):
pass
def testMaxSubArray(self):
s = Solution()
self.assertEqual(s.maxSubArray([-2,1,-3,4,-1,2,1,-5,4]), 6)
self.assertEqual(s.maxSubArray([-2,1]), 1)
self.assertEqual(s.maxSubArray([-1]), -1)
if __name__ == '__main__':
unittest.main()
|
nacl-webkit/chrome_deps
|
tools/telemetry/telemetry/trace_event_importer.py
|
Python
|
bsd-3-clause
| 386
| 0.015544
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
impor
|
t json
from telemetry import timeline_model
def Import(data):
trace = json.loads(data) # pylint: disable=W0612
model = timeli
|
ne_model.TimelineModel()
# TODO(nduca): Actually import things.
return model
|
SEL-Columbia/commcare-hq
|
corehq/apps/adm/reports/__init__.py
|
Python
|
bsd-3-clause
| 6,857
| 0.003354
|
import logging
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from corehq.apps.adm.dispatcher import ADMSectionDispatcher
from corehq.apps.adm.models import REPORT_SECTION_OPTIONS, ADMReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DTSortType
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
from corehq.apps.reports.standard import DatespanMixin, ProjectReportParametersMixin
from dimagi.utils.decorators.memoized import memoized
from django.utils.translation import ugettext as _, ugettext_noop
class ADMSectionView(GenericReportView):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
hide_filters = True
emailable = True
# adm-specific stuff
adm_slug = None
def __init__(self, request, base_context=None, domain=None, **kwargs):
self.adm_sections = dict(REPORT_SECTION_OPTIONS)
if self.adm_slug not in self.adm_sections:
raise ValueError("The adm_slug provided, %s, is not in the list of valid ADM report section slugs: %s." %
(self.adm_slug, ", ".join([key for key, val in self.adm_sections.items()]))
)
self.subreport_slug = kwargs.get("subreport_slug")
super(ADMSectionView, self).__init__(request, base_context, domain=domain, **kwargs)
self.context['report'].update(sub_slug=self.subreport_slug)
if self.subreport_data:
self.name = mark_safe("""%s <small>%s</small>""" %\
(self.subreport_data.get('value', {}).get('name'),
self.adm_sections.get(self.adm_slug, _("ADM Report"))))
@property
def subreport_data(self):
raise NotImplementedError
@property
def default_report_url(self):
return reverse('default_adm_report', args=[self.request.project])
@classmethod
def get_url(cls, domain=None, render_as=None, **kwargs):
subreport = kwargs.get('subreport')
url = super(ADMSectionView, cls).get_url(domain=domain, render_as=render_as, **kwargs)
return "%s%s" % (url, "%s/" % subreport if subreport else "")
class DefaultReportADMSectionView(GenericTabularReport, ADMSectionView, ProjectReportParametersMixin, DatespanMixin):
section_name = ugettext_noop("Active Data Management")
|
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
fix_left_col = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
|
'corehq.apps.reports.filters.select.GroupFilter',
'corehq.apps.reports.filters.dates.DatespanFilter']
hide_filters = False
# adm-specific stuff
adm_slug = None
@property
@memoized
def subreport_data(self):
default_subreport = ADMReport.get_default(self.subreport_slug, domain=self.domain,
section=self.adm_slug, wrap=False)
if default_subreport is None:
return dict()
return default_subreport
@property
@memoized
def adm_report(self):
if self.subreport_data:
try:
adm_report = ADMReport.get_correct_wrap(self.subreport_data.get('key')[-1])
adm_report.set_domain_specific_values(self.domain)
return adm_report
except Exception as e:
logging.error("Could not fetch ADM Report: %s" % e)
return None
@property
@memoized
def adm_columns(self):
if self.adm_report:
column_config = self.report_column_config
if not isinstance(column_config, dict):
ValueError('report_column_config should return a dict')
for col in self.adm_report.columns:
col.set_report_values(**column_config)
return self.adm_report.columns
return []
@property
def headers(self):
if self.subreport_slug is None:
raise ValueError("Cannot render this report. A subreport_slug is required.")
header = DataTablesHeader(DataTablesColumn(_("FLW Name")))
for col in self.adm_report.columns:
sort_type = DTSortType.NUMERIC if hasattr(col, 'returns_numerical') and col.returns_numerical else None
help_text = _(col.description) if col.description else None
header.add_column(DataTablesColumn(_(col.name), sort_type=sort_type, help_text=help_text))
header.custom_sort = self.adm_report.default_sort_params
return header
@property
def rows(self):
rows = []
for user in self.users:
row = [self.table_cell(user.raw_username,
user.username_in_report)]
for col in self.adm_columns:
val = col.raw_value(**user._asdict())
row.append(self.table_cell(col.clean_value(val),
col.html_value(val)))
rows.append(row)
self.statistics_rows = [["Total"], ["Average"]]
for ind, col in enumerate(self.adm_columns):
column_data = [row[1+ind] for row in rows]
self.statistics_rows[0].append(col.calculate_totals(column_data))
self.statistics_rows[1].append(col.calculate_averages(column_data))
return rows
@property
def report_column_config(self):
"""
Should return a dict of values important for rendering the ADMColumns in this report.
"""
return dict(
domain=self.domain,
datespan=self.datespan
)
@classmethod
def override_navigation_list(cls, context):
current_slug = context.get('report', {}).get('sub_slug')
domain = context.get('domain')
subreport_context = []
subreports = ADMReport.get_default_subreports(domain, cls.adm_slug)
if not subreports:
subreport_context.append({
'url': '#',
'warning_label': 'No ADM Reports Configured',
})
return subreport_context
for report in subreports:
key = report.get("key", [])
entry = report.get("value", {})
report_slug = key[-2]
if cls.show_subreport_in_navigation(report_slug):
subreport_context.append({
'is_active': current_slug == report_slug,
'url': cls.get_url(domain=domain, subreport=report_slug),
'description': entry.get('description', ''),
'title': entry.get('name', 'Untitled Report'),
})
return subreport_context
@classmethod
def show_subreport_in_navigation(cls, subreport_slug):
return True
|
Z2PackDev/TBmodels
|
tests/test_constructors.py
|
Python
|
apache-2.0
| 8,064
| 0.00124
|
#!/usr/bin/env python
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Test the Model constructors.
"""
import itertools
import pytest
import numpy as np
import tbmodels
def test_on_site_too_long(get_model):
"""
Check that error is raised when the on_site list is too long.
"""
with pytest.raises(ValueError):
get_model(0.1, 0.2, on_site=[1, 2, 3])
def test_no_size_given(get_model, models_equal):
"""
Check that the Model can be created without explicit size,
"""
model1 = get_model(0.1, 0.2, size=None)
model2 = get_model(0.1, 0.2)
models_equal(model1, model2)
def test_size_from_hop():
"""
Check that the size can be obtained from hopping matrices.
"""
model = tbmodels.Model(hop={(0, 0, 0): np.zeros((4, 4))})
assert model.size == 4
def test_size_unknown(get_model):
"""
Check that an error is raised when the size can not be inferred.
"""
with pytest.raises(ValueError):
get_model(0.1, 0.2, size=None, on_site=None, pos=None)
def test_add_on_site(get_model, models_equal):
"""
Check that adding on-site interaction in the constructor has the
same as effect as adding it after construction.
"""
model1 = get_model(0.1, 0.2, on_site=(1, -2))
model2 = get_model(0.1, 0.2, size=2, on_site=None)
model2.add_on_site((1, -2))
models_equal(model1, model2)
def test_invalid_add_on_site(get_model):
"""
Check that an error is raised when trying to add a list of on-site
interactions that is too long to an existing model.
"""
model = get_model(0.1, 0.2)
with pytest.raises(ValueError):
model.add_on_site((1, 2, 3))
def test_explicit_dim(get_model, models_equal):
"""
Check that explicitly setting the dimension does not change the model.
"""
model1 = get_model(0.1, 0.2, dim=3)
model2 = get_model(0.1, 0.2)
models_equal(model1, model2)
def test_no_dim(get_model):
"""Check that an error is raised when the dimension can not be inferred."""
with pytest.raises(ValueError):
get_model(0.1, 0.2, pos=None)
def test_dim_from_uc():
"""Check that the dimension can be inferred from the unit cell."""
model = tbmodels.Model(uc=((1, 0), (0, 1)), size=5)
assert model.dim == 2
def test_pos_outside_uc(get_model, models_equal):
"""Check that positions outside the UC are mapped back inside."""
model1 = get_model(0.1, 0.2, pos=((0.0, 0.0, 0.0), (-0.5, -0.5, 0.0)))
model2 = get_model(0.1, 0.2)
models_equal(model1, model2)
@pytest.mark.parametrize("sparse", [True, False])
def test_from_hop_list(get_model, models_equal, sparse):
"""
Check the 'from_hop_list' constructor.
"""
t1 = 0.1
t2 = 0.2
hoppings = []
for phase, R in zip([1, -1j, 1j, -1], itertools.product([0, -1], [0, -1], [0])):
hoppings.append([t1 * phase, 0, 1, R])
for R in ((r[0], r[1], 0) for r in itertools.permutations([0, 1])):
hoppings.append([t2, 0, 0, R])
hoppings.append([-t2, 1, 1, R])
model1 = tbmodels.Model.from_hop_list(
hop_list=hoppings,
contains_cc=False,
on_site=(1, -1),
occ=1,
pos=((0.0,) * 3, (0.5, 0.5, 0.0)),
sparse=sparse,
)
model2 = get_model(t1, t2, sparse=sparse)
models_equal(model1, model2)
@pytest.mark.parametrize("sparse", [True, False])
def test_from_hop_list_with_cc(get_model, models_close, sparse):
"""
Check the 'from_hop_list' constructor, where complex conjugate terms
are included in the list.
"""
t1 = 0.1
t2 = 0.2
hoppings = []
for phase, R in zip([1, -1j, 1j, -1], itertools.product([0, -1], [0, -1], [0])):
hoppings.append([t1 * phase, 0, 1, R])
for phase, R in zip([1, -1j, 1j, -1], itertools.product([0, -1], [0, -1], [0])):
hoppings.append([np.conjugate(t1 * phase), 1, 0, tuple(-x for x in R)])
for R in ((r[0], r[1], 0) for r in itertools.permutations([0, 1])):
hoppings.append([t2, 0, 0, R])
hoppings.append([t2, 0, 0, tuple(-x for x in R)])
hoppings.append([-t2, 1, 1, R])
hoppings.append([-t2, 1, 1, tuple(-x for x in R)])
model1 = tbmodels.Model.from_hop_list(
hop_list=hoppings,
contains_cc=True,
on_site=(1, -1),
occ=1,
pos=((0.0,) * 3, (0.5, 0.5, 0.0)),
sparse=sparse,
)
model2 = get_model(t1, t2, sparse=sparse)
models_close(model1, model2)
@pytest.mark.parametrize("sparse", [True, False])
def test_pos_outside_uc_with_hoppings(
get_model, models_equal, sparse
): # pylint: disable=invalid-name
"""
Check the 'from_hop_list' constructor with positions outside of the UC.
"""
t1 = 0.1
t2 = 0.2
hoppings = []
for phase, R in zip([1, -1j, 1j, -1], [(1, 1, 0), (1, 0, 0), (0, 1, 0), (0, 0, 0)]):
hoppings.append([t1 * phase, 0, 1, R])
for R in ((r[0], r[1], 0) for r in itertools.permutations([0, 1])):
hoppings.append([t2, 0, 0, R])
hoppings.append([-t2, 1, 1, R])
model1 = tbmodels.Model.from_hop_list(
hop_list=hoppings,
contains_cc=False,
on_site=(1, -1),
occ=1,
pos=((0.0,) * 3, (-0.5, -0.5, 0.0)),
sparse=sparse,
)
model2 = get_model(t1, t2, sparse=sparse)
models_equal(model1, model2)
def test_invalid_hopping_matrix():
"""
Check that an error is raised when the passed size does not match the
shape of hopping matrices.
"""
with pytest.raises(ValueError):
tbmodels.Model(size=2, hop={(0, 0, 0): np.eye(4)})
def test_non_hermitian_1():
"""
Check that an error is raised when the given hoppings do not correspond
to a hermitian Hamiltonian.
"""
with pytest.raises(ValueError):
tbmodels.Model(size=2, hop={(0, 0, 0): np.eye(2), (1, 0, 0): np.eye(2)})
def test_non_hermitian_2():
"""
Check that
|
an error is raised when the given hoppings do not correspond
to a hermitian Hamiltonian.
"""
with pytest.raises(ValueError):
tbmodels.Model(
size=2,
hop={(0, 0, 0): np.eye(2), (1, 0, 0): np.eye(2), (-1, 0, 0): 2 * np.eye(2)},
)
def test_wrong_key_length():
"""
Check that an error is raised when the reciprocal lattice vectors
have inconsistent lengths.
"""
with pytest.raises(ValueError):
tbmodels.Model(
|
size=2,
hop={(0, 0, 0): np.eye(2), (1, 0, 0): np.eye(2), (-1, 0, 0, 0): np.eye(2)},
contains_cc=False,
)
def test_wrong_pos_length():
"""
Check that an error is raised when the number of positions does not
match the given size.
"""
with pytest.raises(ValueError):
tbmodels.Model(
size=2,
hop={(0, 0, 0): np.eye(2), (1, 0, 0): np.eye(2), (-1, 0, 0): np.eye(2)},
contains_cc=False,
pos=((0.0,) * 3, (0.5,) * 3, (0.2,) * 3),
)
def test_wrong_pos_dim():
"""
Check that an error is raised when the positions have inconsistent
dimensions.
"""
with pytest.raises(ValueError):
tbmodels.Model(
size=2,
hop={(0, 0, 0): np.eye(2), (1, 0, 0): np.eye(2), (-1, 0, 0): np.eye(2)},
contains_cc=False,
pos=((0.0,) * 3, (0.5,) * 4),
)
def test_wrong_uc_shape():
"""
Check that an error is raised when the unit cell is not square.
"""
with pytest.raises(ValueError):
tbmodels.Model(
size=2,
hop={(0, 0, 0): np.eye(2), (1, 0, 0): np.eye(2), (-1, 0, 0): np.eye(2)},
contains_cc=False,
pos=((0.0,) * 3, (0.5,) * 3),
uc=np.array([[1, 2], [3, 4], [5, 6]]),
)
def test_hop_list_no_size():
"""
Check that an error is raised when using 'from_hop_list' and
the size is not known.
"""
with pytest.raises(ValueError):
tbmodels.Model.from_hop_list(hop_list=(1.2, 0, 1, (1, 2, 3)))
|
yephper/django
|
tests/admin_docs/views.py
|
Python
|
bsd-3-clause
| 404
| 0
|
from django.contrib.admindocs.middleware import XViewMiddleware
from django.http import HttpResponse
from django.utils.decorators import decorator_from_middleware
from django.views.generic import View
xview_dec = decorator_from_middleware(XViewMiddleware)
def xvie
|
w(request):
return HttpResponse()
class XViewClas
|
s(View):
def get(self, request):
return HttpResponse()
|
klashxx/PyConES
|
rspace/rspace/docs/conf.py
|
Python
|
mit
| 1,487
| 0.001346
|
# -*- coding: utf-8 -*-
#pylint: skip-file
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('..'))
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.napoleon',
]
autodoc_default_flags = ["members", "show-inheritance"]
autodoc_member_order = "bysource"
templates_path = ['_templates']
source_suffix = '.rst'
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rspace'
copyright = u'GPL'
version = '0.1'
release = '0.1'
language = 'en'
today_fmt = '%d de %B , %Y'
exclude_patterns = ['_build']
show_authors = True
pygments_style = 'sphinx'
html_title = 'rspace Docs'
html_short_title = 'rspace Docs'
html_last_updated_fmt = '%d de %B , %Y'
html_domain_indices = True
html_show_sourcelink = False
html_show_sphinx = False
html_show_copyright = True
htmlhelp_basename = 'rspacedoc'
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinf
|
o_documents = [
('index'
|
, 'rspace', u'cptab Docs',
u'Juan Diego Godoy Robles', 'rspace', 'PyConES 2016 - Almería',
'Miscellaneous'),
]
|
paddycarey/stretch
|
stretch/exceptions.py
|
Python
|
mit
| 710
| 0
|
class StretchException(Exception):
"""Common base class for all exceptions raised explicitly by stretch.
Exceptions which are subclasses of this type will be handled nicely by
stretch and will not cause the program to exit. Any exceptions raised
|
which are not a subclass of this type will exit(1) and print a traceback
to stdout.
"""
level = "error"
def __init__(self, message, **kwarg
|
s):
Exception.__init__(self, message)
self.message = message
self.kwargs = kwargs
def format_message(self):
return self.message
def __unicode__(self):
return self.message
def __str__(self):
return self.message.encode('utf-8')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.