text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Wyoming offers XLS files containing precinct-level results for each county for all years except 2006.
Elections in 2012 and 2010 are contained in zip files on the SoS site and These are represented in the dashboard API
as the `direct_links` attribute on elections. Zip files for 2000, 2002, 2004 and 2008 elections were sent by the SoS
and are stored in the https://github.com/openelections/openelections-data-wy repository in the `raw` directory. 2000
files have been converted from the original Quattro Pro source to XLS files.
For 2006, precinct-level results are contained in county-specific PDF files. The CSV versions of those are contained in the
https://github.com/openelections/openelections-data-wy repository. Special elections are also contained in that repository.
"""
from future import standard_library
standard_library.install_aliases()
from os.path import join
import json
import datetime
import urllib.parse
from openelex import PROJECT_ROOT
from openelex.base.datasource import BaseDatasource
from openelex.lib import build_github_url, build_raw_github_url
class Datasource(BaseDatasource):
# PUBLIC INTERFACE
def mappings(self, year=None):
"""Return array of dicts containing source url and
standardized filename for raw results file, along
with other pieces of metadata
"""
mappings = []
for yr, elecs in list(self.elections(year).items()):
mappings.extend(self._build_metadata(yr, elecs))
return mappings
def target_urls(self, year=None):
"Get list of source data urls, optionally filtered by year"
return [item['raw_url'] for item in self.mappings(year)]
def filename_url_pairs(self, year=None):
return [(item['generated_filename'], self._url_for_fetch(item))
for item in self.mappings(year)]
def unprocessed_filename_url_pairs(self, year=None):
return [(item['generated_filename'].replace(".csv", ".pdf"), item['raw_url'])
for item in self.mappings(year)
if item['pre_processed_url']]
def mappings_for_url(self, url):
return [mapping for mapping in self.mappings() if mapping['raw_url'] == url]
# PRIVATE METHODS
def _build_metadata(self, year, elections):
meta = []
year_int = int(year)
if year > 2008:
for election in elections:
results = [x for x in self._url_paths() if x['date'] == election['start_date']]
for result in results:
county = [c for c in self._jurisdictions() if c['county'] == result['county']][0]
if year > 2010:
generated_filename = self._generate_county_filename(result, election, '.xlsx')
else:
generated_filename = self._generate_county_filename(result, election, '.xls')
meta.append({
"generated_filename": generated_filename,
'raw_url': result['url'],
'raw_extracted_filename': result['raw_extracted_filename'],
"ocd_id": county['ocd_id'],
"name": county['county'],
"election": election['slug']
})
elif year == 2006:
for election in elections:
results = [x for x in self._url_paths() if x['date'] == election['start_date']]
for result in results:
county = [c for c in self._jurisdictions() if c['county'] == result['county']][0]
generated_filename = self._generate_county_filename(result, election, '.csv')
meta.append({
"generated_filename": generated_filename,
"pre_processed_url": build_github_url(self.state, generated_filename),
"raw_url": result['url'],
"ocd_id": county['ocd_id'],
"name": county['county'],
"election": election['slug']
})
else:
for election in elections:
results = [x for x in self._url_paths() if x['date'] == election['start_date']]
for result in results:
county = [c for c in self._jurisdictions() if c['county'] == result['county']][0]
if result['special'] and result['raw_extracted_filename']:
generated_filename = '20021126__wy__special__general__natrona__state_house__36__precinct.xls'
raw_url = build_raw_github_url(self.state, election['start_date'].replace('-',''), result['raw_extracted_filename'])
pre_processed_url = ''
elif result['special']:
generated_filename = result['path']
raw_url = result['url']
pre_processed_url = ''
elif result['raw_extracted_filename'] != '':
generated_filename = self._generate_county_filename(result, election, '.' + result['raw_extracted_filename'].split('.')[1])
pre_processed_url = build_raw_github_url(self.state, election['start_date'].replace('-',''), result['raw_extracted_filename'])
raw_url = ''
else:
generated_filename = self._generate_county_filename(result, election, '.xls')
raw_url = build_raw_github_url(self.state, election['start_date'].replace('-',''), result['raw_extracted_filename'])
pre_processed_url = ''
meta.append({
"generated_filename": generated_filename,
"pre_processed_url": pre_processed_url,
"raw_url": raw_url,
"ocd_id": county['ocd_id'],
"name": county['county'],
"election": election['slug']
})
return meta
def _generate_county_filename(self, result, election, format):
if election['race_type'] == 'general':
bits = [
election['start_date'].replace('-',''),
self.state.lower(),
election['race_type'],
result['county'].lower().replace(' ','_'),
'precinct'
]
elif result['party'] == '':
bits = [
election['start_date'].replace('-',''),
self.state.lower(),
election['race_type'],
result['county'].lower().replace(' ','_'),
'precinct'
]
else:
bits = [
election['start_date'].replace('-',''),
self.state.lower(),
result['party'].lower(),
election['race_type'],
result['county'].lower().replace(' ','_'),
'precinct'
]
return "__".join(bits) + format
def _jurisdictions(self):
"""Wyoming counties"""
m = self.jurisdiction_mappings()
mappings = [x for x in m if x['county'] != ""]
return mappings
def _url_for_fetch(self, mapping):
if mapping['raw_url']:
return mapping['raw_url']
else:
return mapping['pre_processed_url']
| {
"content_hash": "af4bef561cbd288c2d7d0c56d3143e67",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 150,
"avg_line_length": 47.98076923076923,
"alnum_prop": 0.5370741482965932,
"repo_name": "openelections/openelections-core",
"id": "3acacb1a521e951de90c3fdb61e7532de84eca33",
"size": "7485",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "openelex/us/wy/datasource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "57395"
},
{
"name": "Python",
"bytes": "949426"
}
],
"symlink_target": ""
} |
"""
Django settings for es-graphite-shim project
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
import os
BASE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
try:
from local_settings import *
except Exception as e:
raise
import socket
hostname = socket.gethostname()
del socket
# SECURITY WARNING: don't run with debug turned on in production!
if hostname == HOSTNAME:
DEBUG = False
PRODUCTION = True
else:
# All other environments are assumed to be non-production
# environments. You can override this settings here to test out production
# like behaviors.
DEBUG = True
PRODUCTION = False
TEMPLATE_DEBUG = True
# FIXME: If you want to test development mode with ./manage.py runserver as if
# it would be production, you need to add 'localhost' or '*' to the
# ALLOWED_HOSTS list, set DEBUG above to False, and you need to use the
# --insecure switch on runserver (e.g. $ python3 ./manage.py runserver
# --insecure).
if DEBUG:
ALLOWED_HOSTS = ['']
elif PRODUCTION:
# ALLOWED_HOSTS = [HOSTNAME,]
ALLOWED_HOSTS = ['*']
else:
ALLOWED_HOSTS = ['localhost',]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'es-graphite-shim.urls'
WSGI_APPLICATION = 'es-graphite-shim.wsgi.application'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {}
# DATABASES = {
# 'default': {
# 'NAME': os.path.join(BASE_DIR, 'storage/%s.sqlite3' % (DB_NAME)),
# 'ENGINE': 'django.db.backends.sqlite3',
# 'USER': 'apache',
# 'PASSWORD': DB_PASS,
# 'HOST': '',
# 'PORT': ''
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
# FIXME: either remove, or configure based on local settings
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_ROOT = '/mnt/static'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates/'),)
# Initiate ElasticSearch connection
from elasticsearch import Elasticsearch #, client
from urllib3 import Timeout
timeoutobj = Timeout(total=1200, connect=10, read=600)
from time import ctime
print("[%s] - Initiating ES connection" % (ctime()))
ES = Elasticsearch(host=ES_HOST, port=ES_PORT,
timeout=timeoutobj, max_retries=0)
print("[%s] - Established ES connection" % (ctime()))
# get the data to be displayed in drop down list in grafana
from lib.get_es_metadata import get_fieldnames as _get_fieldnames
from lib.get_es_metadata import get_open_indices_list as _get_open_indices_list
# query list of indices in state:open
import json as _js
_indices_path = os.path.join(BASE_DIR, 'lib/mappings/open_indices.json')
try:
if not os.path.exists(_indices_path):
_OPEN_INDICES = _get_open_indices_list(ES, INDEX_PREFIX, DOC_TYPE)
# dict with index name as key and fieldnames as values
f = open(_indices_path, 'wb')
f.write(bytes(_js.dumps(_OPEN_INDICES), 'UTF-8'))
f.close()
else:
f = open(_indices_path, 'rb')
_OPEN_INDICES = _js.loads(f.read().decode('UTF-8'))
f.close()
except Exception as e:
quit("[%s] - ERROR: %s" % (ctime(), e))
print("[%s] - # of Open Indices: %d" % (ctime(), len(_OPEN_INDICES)))
_FIELDS = _get_fieldnames(ES, FIELD, _OPEN_INDICES, doc_type=DOC_TYPE)
# remove methods which won't be used any longer
del _get_fieldnames
del _get_open_indices_list
# build an aggregate dict of mappings to be referred
# for field validation each time a query is issued
from lib.get_es_metadata import get_mappings as _get_mappings
_MAPPINGS = _get_mappings(ES, DOC_TYPE, _fresh=FRESH)
del _get_mappings
del _js
| {
"content_hash": "480b2dfdd19e398a19f089fe014e7e1a",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 79,
"avg_line_length": 29.806818181818183,
"alnum_prop": 0.6995806328631338,
"repo_name": "distributed-system-analysis/es-graphite-shim",
"id": "45ae87f56f0fa4cc1032fd34a63a5b086f5f43d8",
"size": "5268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "es-graphite-shim/es-graphite-shim/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2203"
},
{
"name": "HTML",
"bytes": "1874"
},
{
"name": "Python",
"bytes": "24975"
}
],
"symlink_target": ""
} |
"""
Tests - Deuce Client - Common - Validation - Instances
"""
import mock
from unittest import TestCase
from stoplight import validate
import deuceclient.api as api
import deuceclient.common.validation_instance as val_instance
import deuceclient.common.errors as errors
from deuceclient.tests import *
from deuceclient.utils import UniformSplitter
class ValidationInstanceTests(TestCase):
def setUp(self):
super(ValidationInstanceTests, self).setUp()
self.project_id = create_project_name()
self.vault_id = create_vault_name()
self.block = create_block()
self.storage_id = create_storage_block()
def tearDown(self):
super(ValidationInstanceTests, self).tearDown()
def test_project_intnace(self):
project = api.Project(self.project_id)
@validate(value=val_instance.ProjectInstanceRule)
def check_project(value):
return True
self.assertTrue(check_project(project))
with self.assertRaises(errors.InvalidProjectInstance):
check_project(project.project_id)
def test_vault_instance(self):
vault = api.Vault(self.project_id, self.vault_id)
@validate(value=val_instance.VaultInstanceRule)
def check_vault(value):
return True
self.assertTrue(check_vault(vault))
with self.assertRaises(errors.InvalidVaultInstance):
check_vault(vault.vault_id)
def test_block_instance(self):
block = api.Block(self.project_id, self.vault_id, self.block[0])
@validate(value=val_instance.BlockInstanceRule)
def check_block(value):
return True
self.assertTrue(check_block(block))
with self.assertRaises(errors.InvalidBlockInstance):
check_block(block.block_id)
def test_file_splitter_instance(self):
reader = make_reader(100, null_data=True)
splitter = UniformSplitter(self.project_id, self.vault_id, reader)
@validate(value=val_instance.FileSplitterInstanceRule)
def check_splitter(value):
return True
self.assertTrue(check_splitter(splitter))
with self.assertRaises(errors.InvalidFileSplitterType):
check_splitter(self.project_id)
| {
"content_hash": "c8fdc0c1e1d6d940e73815f87b90ab9b",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 74,
"avg_line_length": 29.776315789473685,
"alnum_prop": 0.6800707026071586,
"repo_name": "rackerlabs/deuce-client",
"id": "d842e283760a49a5f2f10c988e15a05bf75275bc",
"size": "2263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deuceclient/tests/test_common_validation_instance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "362823"
}
],
"symlink_target": ""
} |
"""hypothesis2
This package contains a variety of helpers for property testing using
hypothesis, e.g. additional decorators, strategies, etc.
"""
from tests.hypothesis2.core import examples
| {
"content_hash": "b24501d351b7b9b07493670eeb3a84e7",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 69,
"avg_line_length": 27.428571428571427,
"alnum_prop": 0.796875,
"repo_name": "lidatong/dataclasses-json",
"id": "86809fce8dc9515f03358c2b897bbe9d1e49604b",
"size": "192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/hypothesis2/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132753"
},
{
"name": "Shell",
"bytes": "209"
}
],
"symlink_target": ""
} |
import os
from smda.utility.PeFileLoader import PeFileLoader
from smda.utility.ElfFileLoader import ElfFileLoader
from smda.utility.MachoFileLoader import MachoFileLoader
from smda.utility.DelphiKbFileLoader import DelphiKbFileLoader
class FileLoader(object):
_file_path = None
_map_file = False
_data = b""
_raw_data = b""
_base_addr = 0
_bitness = 0
_code_areas = []
file_loaders = [PeFileLoader, ElfFileLoader, MachoFileLoader, DelphiKbFileLoader]
def __init__(self, file_path, load_file=True, map_file=False):
self._file_path = file_path
self._map_file = map_file
if load_file:
self._loadFile()
def _loadRawFileContent(self):
binary = ""
if os.path.isfile(self._file_path):
with open(self._file_path, "rb") as inf:
binary = inf.read()
return binary
def _loadFile(self, buffer=None):
self._raw_data = buffer if buffer is not None else self._loadRawFileContent()
if self._map_file:
for loader in self.file_loaders:
if loader.isCompatible(self._raw_data):
self._data = loader.mapBinary(self._raw_data)
self._base_addr = loader.getBaseAddress(self._raw_data)
self._bitness = loader.getBitness(self._raw_data)
self._code_areas = loader.getCodeAreas(self._raw_data)
break
else:
self._data = self._raw_data
def getData(self):
return self._data
def getRawData(self):
return self._raw_data
def getBaseAddress(self):
return self._base_addr
def getBitness(self):
return self._bitness
def getCodeAreas(self):
return self._code_areas
| {
"content_hash": "ad98abcd747747fdad1d5ea871993954",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 85,
"avg_line_length": 31.42105263157895,
"alnum_prop": 0.6063651591289783,
"repo_name": "danielplohmann/smda",
"id": "45a0145b55f8fd489a0bae94d5f4e6354db24d4b",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smda/utility/FileLoader.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "464"
},
{
"name": "Python",
"bytes": "301605"
}
],
"symlink_target": ""
} |
import os
import argparse
keywords = {\
"mac":[\
"brew install",\
"brew cask install",\
"port install"\
],\
"linux":[\
"apt-get install",\
"aptitude install",\
"yum install",\
"pacman install",\
"dpkg -i",\
"dnf install",\
"zypper in",\
"make install",\
"tar "\
],\
"lua":[\
"luarocks install",\
"luarocks make"\
],\
"python":[\
"pip install",\
"easy_install",\
"conda install"\
],\
"ruby":[\
"gem install",\
"rvm install"\
],
"node":[\
"npm install",\
"bower install"\
],\
}
def whatinstalled():
parser = argparse.ArgumentParser(description='A simple tool to retrieve what you installed using CLI')
parser.add_argument('-f', '--file', dest='bash_file', type=str, help="custom file to parse", default="~/.bash_history")
parser.add_argument('-p', '--profile', dest='profile', type=str, help="specific profile to use", default=None)
args = parser.parse_args()
global keywords
history_file = os.path.expanduser(args.bash_file)
f = open(history_file,"r")
if(args.profile != None and args.profile in keywords):
keywords = {args.profile:keywords[args.profile]}
elif(args.profile != None and args.profile not in keywords):
print("\n[ERROR]Profile doesn't exist\n")
exit(0)
for line in f:
for category in keywords:
for item in keywords[category]:
if item in line:
print("["+category+"]"+str(line[:-1]))
if __name__ == '__main__':
whatinstalled()
| {
"content_hash": "f5c36a59f714c60966c07d43c12a2be0",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 135,
"avg_line_length": 22.75,
"alnum_prop": 0.6188186813186813,
"repo_name": "AlexMili/WhatInstalled",
"id": "4b0b5911156215dc62e2a9ee8449bf5d4d203a5f",
"size": "1456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "whatinstalled.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1800"
}
],
"symlink_target": ""
} |
import pandas as pd
import csv
r = pd.read_csv('DropOutRatePrimary.csv')
dropOut = pd.DataFrame(r)
r2 = pd.read_csv('Countries.csv')
countries = pd.DataFrame(r2)
countries = countries.drop(countries.columns[[2,3,4,5,6,7,8,9,10]], axis=1)
countries.columns = ['Continent','Reference Area']
result = pd.merge(dropOut, countries, on='Reference Area')
result = result.sort_values(by=["Continent","Observation Value"], ascending=True)
result.to_csv('DropOutWithCont.csv')
##writer = csv.writer(open('output.csv', 'w'))
##for row in r:
## print(row)
## writer.writerow(row)
| {
"content_hash": "cfd6dea9384f7e94ae103c03e7ff9965",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 81,
"avg_line_length": 26.40909090909091,
"alnum_prop": 0.7039586919104991,
"repo_name": "caHaber/caHaber.github.io",
"id": "71f5958c699199fe5d6e978924b8223680e9f742",
"size": "581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UNDemo's/accessorAddCont.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109165"
},
{
"name": "HTML",
"bytes": "276281"
},
{
"name": "JavaScript",
"bytes": "409926"
},
{
"name": "Python",
"bytes": "3761"
},
{
"name": "Shell",
"bytes": "176"
}
],
"symlink_target": ""
} |
"""
WSGI config for WY CTF Website project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "1e98b1d8d7a34be4e24499ec6826fcd1",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 42.6764705882353,
"alnum_prop": 0.7932460372157133,
"repo_name": "pattyjogal/wy_ctf_website",
"id": "d708e97db641d8e81730dedb8c71053d59755419",
"size": "1451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4582"
},
{
"name": "HTML",
"bytes": "37951"
},
{
"name": "JavaScript",
"bytes": "76000"
},
{
"name": "Python",
"bytes": "76218"
},
{
"name": "Shell",
"bytes": "4232"
}
],
"symlink_target": ""
} |
from django import db
from django.conf import settings
from django.core.management.base import NoArgsCommand
from data.models import AnnualStateEnergyExpenditures
import csv
# National Priorities Project Data Repository
# import_energy_consumption_state_annual.py
# Updated 1/14/2010, Joshua Ruihley, Sunlight Foundation
# Imports U.S. Department of Energy Annual State Energy Expenditure data
# source file: http://www.eia.doe.gov/emeu/states/sep_prices/total/csv/ex_all.csv (accurate as of 11/18/2009)
# destination model: StateAnnualEnergyExpenditures
# HOWTO:
# 1) Download source file from url listed above
# 2) change SOURCE_FILE variable to the the path of the source file you just downloaded
# 3) Run as Django management command from your project path "python manage.py import_energy_expenditures_state_annual"
SOURCE_FILE = '%s/doe.gov/ex_all.csv' % settings.LOCAL_DATA_ROOT
class Command(NoArgsCommand):
def handle_noargs(self, **options):
data_reader = csv.reader(open(SOURCE_FILE))
i=0
for row in data_reader:
if (i==0):
fields = row
else:
j=0
row_dict = {}
for column in fields:
if(column == 'State' or column == 'MSN'):
row_dict[column] = row[j]
else:
row_dict['year'] = int(column)
if row[j] == '':
row[j] = None;
else:
row[j] = float(row[j])
row_dict['value'] = row[j]
db_row = AnnualStateEnergyExpenditures(state=row_dict['State'],
msn=row_dict['MSN'], year=row_dict['year'], value=row_dict['value'])
db_row.save()
db.reset_queries()
j = j + 1
i = i + 1 | {
"content_hash": "094c177d51cf2e7ca0ac9095916eeded",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 119,
"avg_line_length": 40.916666666666664,
"alnum_prop": 0.5570264765784114,
"repo_name": "npp/npp-api",
"id": "f0fddd4d50db06711c8ef8bcd5a176b40794d936",
"size": "1964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/management/commands/import_energy_expenditures_state_annual.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5982539"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manutencao', '0006_0006_fo2_man_user_tipo_maq'),
]
operations = [
migrations.AddField(
model_name='maquina',
name='data_inicio',
field=models.DateField(default=datetime.date(2019, 7, 12), verbose_name='Data de início da rotina'),
preserve_default=False,
),
]
| {
"content_hash": "51436e3e1ae63afdbf8400a6630cbc9a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 112,
"avg_line_length": 25.35,
"alnum_prop": 0.6193293885601578,
"repo_name": "anselmobd/fo2",
"id": "b13c4452269f4cf09b5334fe3bcbe65024ab07a8",
"size": "581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/manutencao/migrations/0007_maquina_data_inicio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
from .service_principal_base import ServicePrincipalBase
class ServicePrincipalCreateParameters(ServicePrincipalBase):
"""Request parameters for creating a new service principal.
All required parameters must be populated in order to send to Azure.
:param account_enabled: whether or not the service principal account is
enabled
:type account_enabled: bool
:param app_role_assignment_required: Specifies whether an
AppRoleAssignment to a user or group is required before Azure AD will
issue a user or access token to the application.
:type app_role_assignment_required: bool
:param key_credentials: The collection of key credentials associated with
the service principal.
:type key_credentials: list[~azure.graphrbac.models.KeyCredential]
:param password_credentials: The collection of password credentials
associated with the service principal.
:type password_credentials:
list[~azure.graphrbac.models.PasswordCredential]
:param service_principal_type: the type of the service principal
:type service_principal_type: str
:param tags: Optional list of tags that you can apply to your service
principals. Not nullable.
:type tags: list[str]
:param app_id: Required. The application ID.
:type app_id: str
"""
_validation = {
'app_id': {'required': True},
}
_attribute_map = {
'account_enabled': {'key': 'accountEnabled', 'type': 'bool'},
'app_role_assignment_required': {'key': 'appRoleAssignmentRequired', 'type': 'bool'},
'key_credentials': {'key': 'keyCredentials', 'type': '[KeyCredential]'},
'password_credentials': {'key': 'passwordCredentials', 'type': '[PasswordCredential]'},
'service_principal_type': {'key': 'servicePrincipalType', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[str]'},
'app_id': {'key': 'appId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ServicePrincipalCreateParameters, self).__init__(**kwargs)
self.app_id = kwargs.get('app_id', None)
| {
"content_hash": "984270005a7800f39c2a4dd212ed9c99",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 95,
"avg_line_length": 43.520833333333336,
"alnum_prop": 0.6792723791287697,
"repo_name": "Azure/azure-sdk-for-python",
"id": "ec6daf3704143337c54d5691ef15f05d5d5d8d6b",
"size": "2563",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/service_principal_create_parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from pyspark.tests import ReusedPySparkTestCase, PySparkTestCase
from cleaning.run import run
from pyspark.ml import clustering
class TestRun(PySparkTestCase):
def test_run(self):
print(clustering.__all__)
self.fail()
def test_parse_algorithm_variables(self):
self.fail() | {
"content_hash": "53503392047feb3cf0c606f2575fec98",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 64,
"avg_line_length": 20.6,
"alnum_prop": 0.7119741100323624,
"repo_name": "mssalvador/WorkflowCleaning",
"id": "b5f0b870bff63e4d763f5cabc58fc90fab534961",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/cleaning/test_run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1053440"
},
{
"name": "Makefile",
"bytes": "396"
},
{
"name": "Python",
"bytes": "179722"
}
],
"symlink_target": ""
} |
import os
import ConfigParser
import Leap
from Leap import CircleGesture, SwipeGesture
class GestureListener(Leap.Listener):
def __init__(self):
super(GestureListener, self).__init__()
self.config = ConfigParser.ConfigParser()
self.gestures_stopped = {
"circleleft": 0,
"circleright": 0,
"keytap": 0,
"screentap": 0,
"swipeleft": 0,
"swiperight": 0,
"swipeup": 0,
"swipedown": 0
}
self.gestures_started = {
"circleleft": 0,
"circleright": 0,
"keytap": 0,
"screentap": 0,
"swipeleft": 0,
"swiperight": 0,
"swipeup": 0,
"swipedown": 0
}
self.gestures = [
"circleleft",
"circleright",
"keytap",
"screentap",
"swipeleft",
"swiperight",
"swipeup",
"swipedown"
]
def on_init(self, controller):
self.read_config()
print "Initialized"
def read_config(self):
self.config.read("./gesture_commands.cfg")
print("Configuration loaded")
for section in self.config.sections():
print("\n" + section + ":\n")
for option in self.config.options(section):
value = self.config.get(section, option)
print(" " + option + ": " + value)
def on_connect(self, controller):
controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE)
# controller.enable_gesture(Leap.Gesture.TYPE_KEY_TAP)
controller.enable_gesture(Leap.Gesture.TYPE_SCREEN_TAP)
controller.enable_gesture(Leap.Gesture.TYPE_SWIPE)
print "Connected"
def on_disconnect(self, controller):
print "Disconnected"
def on_exit(self, controller):
print "Exited"
def on_frame(self, controller):
frame = controller.frame()
for gesture in frame.gestures():
number_of_pointables = len(gesture.pointables)
if number_of_pointables != 0 and gesture.is_valid and (gesture.state == Leap.Gesture.STATE_START or gesture.state == Leap.Gesture.STATE_STOP):
gestures = None
if gesture.state == Leap.Gesture.STATE_START:
gestures = self.gestures_started
elif gesture.state == Leap.Gesture.STATE_STOP:
gestures = self.gestures_stopped
if gesture.type == Leap.Gesture.TYPE_CIRCLE:
circle = CircleGesture(gesture)
if circle.pointable.direction.angle_to(circle.normal) > Leap.PI / 4:
gestures["circleleft"] += number_of_pointables
else:
gestures["circleright"] += number_of_pointables
elif gesture.type == Leap.Gesture.TYPE_KEY_TAP:
self.gestures_started["keytap"] += number_of_pointables
gestures["keytap"] += number_of_pointables
elif gesture.type == Leap.Gesture.TYPE_SCREEN_TAP:
gestures["screentap"] += number_of_pointables
elif gesture.type == Leap.Gesture.TYPE_SWIPE:
swipe = SwipeGesture(gesture)
left = 0
right = 0
up = 0
down = 0
horizontal_movement = swipe.direction[0]
vertical_movement = swipe.direction[1]
movement = None
if horizontal_movement != abs(horizontal_movement):
left = abs(horizontal_movement)
elif horizontal_movement != 0:
right = horizontal_movement
if vertical_movement != abs(vertical_movement):
down = abs(vertical_movement)
elif vertical_movement != 0:
up = vertical_movement
if left > right and left > up and left > down:
movement = "left"
elif right > left and right > up and right > down:
movement = "right"
elif up > down and up > left and up > right:
movement = "up"
elif down > up and down > left and down > right:
movement = "down"
gestures["swipe" + movement] += 1
for gesture_name in self.gestures:
if self.gestures_started.get(gesture_name) != 0 and self.gestures_started.get(gesture_name) == self.gestures_stopped.get(gesture_name):
self.execute(gesture_name, str(self.gestures_started.get(gesture_name)))
self.gestures_started[gesture_name] = 0
self.gestures_stopped[gesture_name] = 0
elif self.gestures_started.get(gesture_name) > 5 or self.gestures_stopped.get(gesture_name) >= 5:
self.gestures_started[gesture_name] = 0
self.gestures_stopped[gesture_name] = 0
def execute(self, command_name, number_of_pointables):
command = "Not defined in config!"
if self.config.has_option(command_name, number_of_pointables):
command = self.config.get(command_name, number_of_pointables)
os.system(command)
print("")
print("Gesture: " + command_name)
print("Pointables: " + number_of_pointables)
print("Command: " + command) | {
"content_hash": "99febf05f803576711e666b47759ec69",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 154,
"avg_line_length": 33.05882352941177,
"alnum_prop": 0.5245551601423487,
"repo_name": "dylian94/LeapGestureControl",
"id": "205287fcddaeb22722edbeb3e2e1724e47de8373",
"size": "5620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GestureListener.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6025"
}
],
"symlink_target": ""
} |
"""
test_django-excel-to-model
------------
Tests for `django-excel-to-model` models module.
"""
from django.test import TestCase
# from unittest import TestCase
from django_excel_to_model.openpyxl_reader import OpenpyxlExcelFile
from sap_asset_master_data20191224.models import mapping
class TestOpenpyxl(TestCase):
def setUp(self):
pass
def test_something(self):
x = OpenpyxlExcelFile(r"C:\N-PC0WN7R6-Data\q19420\Downloads\sapItems20191223-1.XLSx")
s = x.get_sheet(0)
s.set_header_row(0)
for i in s.enumerate_mapped(mapping, 2):
print(i)
def tearDown(self):
pass
| {
"content_hash": "6f0dcaceb433b54d79cb9069a2eda2dc",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 93,
"avg_line_length": 23.035714285714285,
"alnum_prop": 0.6697674418604651,
"repo_name": "weijia/django-excel-to-model",
"id": "b078190b27e28c76e2f6312fda29c3c87dbe3e12",
"size": "692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_excel_to_model/tests/test_openpyxl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1774"
},
{
"name": "Makefile",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "49812"
}
],
"symlink_target": ""
} |
import sys
import time
import pymongo
import numpy.random as npr
from abstractdb import AbstractDB
from spearmint.utils.compression import compress_nested_container, decompress_nested_container
class MongoDB(AbstractDB):
def __init__(self, database_address='localhost', database_name='spearmint'):
try:
self.client = pymongo.MongoClient(database_address)
self.db = self.client[database_name]
# Get the ID of this connection for locking.
self.myId = self.db.last_status()['connectionId']
except:
raise Exception('Could not establish a connection to MongoDB.')
def save(self, save_doc, experiment_name, experiment_field, field_filters=None):
"""
Saves a document into the database.
Compresses any numpy arrays so that they can be saved to MongoDB.
field_filters must return at most one document, otherwise it is not clear
which one to update and an exception will be raised.
"""
if field_filters is None:
field_filters = {}
save_doc = compress_nested_container(save_doc)
dbcollection = self.db[experiment_name][experiment_field]
dbdocs = list(dbcollection.find(field_filters))
upsert = False
if len(dbdocs) > 1:
raise Exception('Ambiguous save attempted. Field filters returned more than one document.')
elif len(dbdocs) == 1:
dbdoc = dbdocs[0]
else:
#sys.stderr.write('Document not found, inserting new document.\n')
upsert = True
#TODO: change this to find_and_modify
result = dbcollection.update(field_filters, save_doc, upsert=upsert)
if upsert:
return result['upserted']
else:
return result['updatedExisting']
def load(self, experiment_name, experiment_field, field_filters=None):
# Return a list of documents from the database, decompressing any numpy arrays
if field_filters is None:
field_filters = {}
dbcollection = self.db[experiment_name][experiment_field]
dbdocs = list(dbcollection.find(field_filters))
if len(dbdocs) == 0:
return None
elif len(dbdocs) == 1:
return decompress_nested_container(dbdocs[0])
else:
return [decompress_nested_container(dbdoc) for dbdoc in dbdocs]
def remove(self, experiment_name, experiment_field, field_filters={}):
self.db[experiment_name][experiment_field].remove(field_filters)
| {
"content_hash": "d523932c3aa181201c2b2fec202e96cc",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 103,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.6318807339449541,
"repo_name": "fmaguire/BayeHem",
"id": "28ab540774a13bb4f41fa1a3eda3e50d327def0b",
"size": "12246",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Spearmint/spearmint/utils/database/mongodb.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "10914"
},
{
"name": "Batchfile",
"bytes": "23271"
},
{
"name": "C",
"bytes": "3837551"
},
{
"name": "C++",
"bytes": "33218177"
},
{
"name": "CSS",
"bytes": "1556"
},
{
"name": "Groff",
"bytes": "59793"
},
{
"name": "HTML",
"bytes": "365248"
},
{
"name": "IDL",
"bytes": "14"
},
{
"name": "Java",
"bytes": "13433"
},
{
"name": "Lua",
"bytes": "23713"
},
{
"name": "M4",
"bytes": "19951"
},
{
"name": "Makefile",
"bytes": "118962"
},
{
"name": "Objective-C",
"bytes": "8790"
},
{
"name": "Perl",
"bytes": "272990"
},
{
"name": "Python",
"bytes": "6200881"
},
{
"name": "QML",
"bytes": "593"
},
{
"name": "R",
"bytes": "4898"
},
{
"name": "Shell",
"bytes": "270614"
},
{
"name": "TeX",
"bytes": "8434"
},
{
"name": "XSLT",
"bytes": "759"
},
{
"name": "Yacc",
"bytes": "18910"
}
],
"symlink_target": ""
} |
import os
from multiprocessing import cpu_count
from typing import List
import numpy as np
import tensorflow as tf
from sklearn import datasets
from sklearn.model_selection import train_test_split
from ._base import ImageDataset
def make_halfmoons(n_samples_per_factors=200,
image_size=64,
marker_size=12.,
seed=1,
n_cpu=4):
from matplotlib import pyplot as plt
from odin.utils import MPI
from tqdm import tqdm
rand = np.random.RandomState(seed=seed)
shapes = ['o', 's', '^', 'p']
shapes_to_idx = {v: k for k, v in enumerate(shapes)}
colors = np.linspace(0.0, 1.0, num=10)
n_factors = len(shapes) * len(colors)
n_samples = n_samples_per_factors * n_factors
shapes = np.tile(shapes, [int(n_samples / len(shapes))])
colors = np.tile(colors, [int(n_samples / len(colors))])
rand.shuffle(shapes)
rand.shuffle(colors)
# === 1. Generate data
x, y = datasets.make_moons(n_samples=n_samples,
shuffle=True,
noise=.05,
random_state=rand.randint(1e8))
x_min = np.min(x, 0, keepdims=True)
x_max = np.max(x, 0, keepdims=True)
x = (x - x_min) / (x_max - x_min) * 2. - 1.
# === 2. Helper
dpi = 200
cmap = plt.get_cmap('coolwarm')
def create_image(ids: List[int]):
all_x = []
all_y = []
for i in ids:
fig = plt.figure(figsize=(image_size / dpi, image_size / dpi),
dpi=dpi,
facecolor="black",
frameon=True)
ax = plt.gca()
ax.set_facecolor('black')
ax.scatter(x[i, 0], x[i, 1],
s=marker_size,
marker=shapes[i],
color=cmap(colors[i]),
antialiased=True,
edgecolors='none')
ax.set_xlim([-1.2, 1.2])
ax.set_ylim([-1.2, 1.2])
ax.axis('off')
ax.margins(0)
fig.tight_layout(pad=0)
# convert to array
fig.canvas.draw()
img = np.frombuffer(fig.canvas.tostring_rgb(), np.uint8)
img = np.reshape(img, (image_size, image_size, 3))
# img = np.asarray(fig.canvas.buffer_rgba())[:, :, :3]
plt.close(fig)
# save data
all_x.append(np.expand_dims(img, 0))
all_y.append(
[x[i, 0], x[i, 1], y[i], colors[i] * 2. - 1., shapes_to_idx[shapes[i]]])
return np.concatenate(all_x, 0), np.vstack(all_y)
# === 2. Generate images
jobs = list(range(n_samples))
progress = tqdm(total=n_samples, unit='images')
X = []
Y = []
for img, lab in MPI(jobs, create_image, ncpu=n_cpu, batch=100):
progress.update(img.shape[0])
X.append(img)
Y.append(lab)
progress.clear()
progress.close()
return np.concatenate(X, 0), np.concatenate(Y, 0)
class HalfMoons(ImageDataset):
"""Half Moons data but instead of position as features, we save the images
of each data points include some factor of variations:
The factors are:
- x position [-1, 1]
- y position [-1, 1]
- labels [0, 1]
- colors [-1, 1] cmap 'coolwarm' (10 linearly spaced values)
- shapes [0 'circle', 1 'square', 2 'triangle', 3 'pentagon']
There are 4000 images,
i.e. 100 images for each combination of color and shape
The tensorflow datasets don't include the labels as factors
Raw numpy arrays (included the labels) could be accessed via attributes:
- x_train, y_train
- x_valid, y_valid
- x_test, y_test
"""
def __init__(self,
path: str = '~/.halfmoons',
n_cpu: int = -1,
seed: int = 1):
super(HalfMoons, self).__init__()
self.image_size = 64
if n_cpu <= 0:
n_cpu = cpu_count() - 1
path = os.path.abspath(os.path.expanduser(path))
if '.npz' not in path.lower():
path = f'{path}.npz'
if not os.path.exists(path):
X, Y = make_halfmoons(image_size=self.image_size, n_cpu=n_cpu, seed=seed)
np.savez_compressed(path, X=X, Y=Y)
else:
data = np.load(path)
X = data['X']
Y = data['Y']
Y = Y.astype(np.float32)
x_train, x_test, y_train, y_test = train_test_split(X, Y, train_size=0.8,
random_state=seed,
stratify=Y[:, 2])
x_train, x_valid, y_train, y_valid = train_test_split(
x_train, y_train, train_size=0.9,
random_state=seed,
stratify=y_train[:, 2])
# numpy datasets
self.x_train = x_train
self.x_valid = x_valid
self.x_test = x_test
self.y_train = y_train
self.y_valid = y_valid
self.y_test = y_test
# remove the labels
y_train = y_train[:, [0, 1, 3, 4]]
y_valid = y_valid[:, [0, 1, 3, 4]]
y_test = y_test[:, [0, 1, 3, 4]]
# tensorflow datasets
self.train = tf.data.Dataset.zip((
tf.data.Dataset.from_tensor_slices(x_train),
tf.data.Dataset.from_tensor_slices(y_train)))
self.valid = tf.data.Dataset.zip((
tf.data.Dataset.from_tensor_slices(x_valid),
tf.data.Dataset.from_tensor_slices(y_valid)))
self.test = tf.data.Dataset.zip((
tf.data.Dataset.from_tensor_slices(x_test),
tf.data.Dataset.from_tensor_slices(y_test)))
@property
def binarized(self) -> bool:
return False
@property
def shape(self) -> List[int]:
return [self.image_size, self.image_size, 3]
@property
def labels(self) -> List[str]:
return ['x', 'y', 'color', 'shape']
| {
"content_hash": "62cfca5b21e242840482654c4f8991bd",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 80,
"avg_line_length": 31.445714285714285,
"alnum_prop": 0.565328002907505,
"repo_name": "imito/odin",
"id": "80d15a1b246a82bafd95e0f851bac60aea5a8cca",
"size": "5503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "odin/fuel/image_data/toys.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1516670"
}
],
"symlink_target": ""
} |
__author__ = 'buec'
import modgrammar
import sys
from pyspeechgrammar import model
class JavaIdentifier(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.WORD("A-Za-z$", "A-Za-z0-9_$"))
def grammar_elem_init(self, session_data):
self.value = self[0].string
class Package(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.LIST_OF(JavaIdentifier, sep=".", min=1))
def grammar_elem_init(self, session_data):
self.value = self[0].string
class SelfIdentifyingHeader(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.LITERAL("#JSGF"), modgrammar.WHITESPACE,
modgrammar.WORD("A-Za-z0-9._\-"),
modgrammar.OPTIONAL(modgrammar.WHITESPACE, modgrammar.WORD("A-Za-z0-9._\-")),
modgrammar.OPTIONAL(modgrammar.WHITESPACE, modgrammar.WORD("A-Za-z0-9._\-")),
modgrammar.LITERAL(";"), modgrammar.LITERAL('\n'))
def grammar_elem_init(self, session_data):
self.version = self[2].string
if self[3] is not None:
self.encoding = self[3][1].string
else:
self.encoding = None
if self[4] is not None:
self.locale = self[4][1].string
else:
self.locale = None
class NameDeclaration(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.LITERAL("grammar"), modgrammar.WHITESPACE,
modgrammar.OPTIONAL(Package, modgrammar.LITERAL('.')),
JavaIdentifier,
modgrammar.LITERAL(";"))
def grammar_elem_init(self, session_data):
self.name = self[3].value
if self[2] is None:
self.package = None
else:
self.package = self[2][0].value
class ImportStatement(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (
modgrammar.LITERAL("import"), modgrammar.WHITESPACE, modgrammar.LITERAL("<"),
Package,
modgrammar.LITERAL('.'),
JavaIdentifier | modgrammar.LITERAL('*'),
modgrammar.LITERAL(">"),
modgrammar.LITERAL(";"), modgrammar.OPTIONAL(modgrammar.WHITESPACE))
def grammar_elem_init(self, session_data):
self.package = self[3].value
self.rule = self[5].string
class Header(modgrammar.Grammar):
grammar_whitespace_mode = 'optional'
grammar = (SelfIdentifyingHeader, NameDeclaration, modgrammar.ZERO_OR_MORE(ImportStatement))
def grammar_elem_init(self, session_data):
self.version = self[0].version
self.encoding = self[0].encoding
self.locale = self[0].locale
self.name = self[1].name
self.package = self[1].package
self.imports = []
for i in range(len(self[2].elements)):
self.imports.append(self[2][i])
class PublicModifier(modgrammar.Grammar):
grammar_whitespace_mode = 'optional'
grammar = (modgrammar.LITERAL("public"))
class MetaCharacter(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.OR(modgrammar.L(';'), modgrammar.L('='), modgrammar.L('|'), modgrammar.L('*'),
modgrammar.L('+'), modgrammar.L('<'), modgrammar.L('>'), modgrammar.L('('),
modgrammar.L(')'), modgrammar.L('['), modgrammar.L(']'), modgrammar.L('{'),
modgrammar.L('}'), modgrammar.L('/*'), modgrammar.L('*/'), modgrammar.L('//'),
modgrammar.L(" "), modgrammar.L('"')))
class Tag(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.L('{'), modgrammar.ZERO_OR_MORE(modgrammar.ANY_EXCEPT('^}')), modgrammar.L('}'))
def grammar_elem_init(self, session_data):
self.model = model.Tag(name=self[1].string)
class UnaryOperator(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.OPTIONAL(modgrammar.WHITESPACE),
modgrammar.L('*') |
modgrammar.L('+') |
modgrammar.LIST_OF(Tag, sep=" ", min=1))
def grammar_elem_init(self, session_data):
self.is_kleene_star = False
self.is_plus = False
self.tags = []
if self[1].string == '*':
self.is_kleene_star = True
elif self[1].string == '+':
self.is_plus = True
else:
for i in range(0, len(self[1].elements), 2):
self.tags.append(self[1][i].model)
class Token(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.ONE_OR_MORE(modgrammar.EXCEPT(modgrammar.ANY(""), MetaCharacter)))
def grammar_elem_init(self, session_data):
self.model = model.Token(value=self[0].string.strip())
class QuotedToken(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.LITERAL('"'), modgrammar.OPTIONAL(Token), modgrammar.ONE_OR_MORE(modgrammar.WHITESPACE, Token),
modgrammar.LITERAL('"'))
def grammar_elem_init(self, session_data):
if self[1] is not None:
value = self[1].string + self[2].string
else:
value = self[2].string
self.model = model.Token(value=value.strip())
class RuleReference(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.L('<'),
modgrammar.OPTIONAL(Package, modgrammar.L('.')),
JavaIdentifier,
modgrammar.L('>'))
def grammar_elem_init(self, session_data):
if self[1] is not None:
value = self[1].string + self[2].string
else:
value = self[2].string
self.model = model.RuleReference(value)
class Group(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.L('('), modgrammar.OPTIONAL(modgrammar.WHITESPACE),
modgrammar.REF("RuleExpansion", module=sys.modules[__name__]),
modgrammar.OPTIONAL(modgrammar.WHITESPACE), modgrammar.L(')'))
def grammar_elem_init(self, session_data):
self.model = model.Group(self[2].model)
class OptionalGroup(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.L('['), modgrammar.OPTIONAL(modgrammar.WHITESPACE),
modgrammar.REF("RuleExpansion", module=sys.modules[__name__]),
modgrammar.OPTIONAL(modgrammar.WHITESPACE), modgrammar.L(']'))
def grammar_elem_init(self, session_data):
self.model = model.OptionalGroup(self[2].model)
class SequenceRuleExpansion(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (Token |
QuotedToken |
RuleReference |
Group |
OptionalGroup,
modgrammar.OPTIONAL(UnaryOperator))
def grammar_elem_init(self, session_data):
self.model = self[0].model
if self[1] is not None:
min_repeat = 1
max_repeat = 1
if self[1].is_kleene_star:
min_repeat = 0
max_repeat = model.Element.INFINITY_REPEAT
elif self[1].is_plus:
min_repeat = 1
max_repeat = model.Element.INFINITY_REPEAT
self.model.set_repeat(min_repeat, max_repeat)
for tag in self[1].tags:
self.model.add_tag(tag)
class Sequence(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (SequenceRuleExpansion,
modgrammar.WHITESPACE,
modgrammar.LIST_OF(SequenceRuleExpansion, sep=modgrammar.WHITESPACE, min=1))
def grammar_elem_init(self, session_data):
self.model = model.Sequence()
self.model.add_element(self[0].model)
for i in range(0, len(self[2].elements), 2):
self.model.add_element(self[2][i].model)
class AlternativeWeight(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.L("/"), modgrammar.WORD("0-9.", "0-9.ef"), modgrammar.LITERAL("/"), modgrammar.WHITESPACE)
def grammar_elem_init(self, session_data):
self.value = self[1].string
class AlternativeSeparator(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.OPTIONAL(modgrammar.WHITESPACE),
modgrammar.LITERAL("|"),
modgrammar.OPTIONAL(modgrammar.WHITESPACE))
class AlternativeRuleExpansion(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (Token |
QuotedToken |
RuleReference |
Sequence |
Group |
OptionalGroup,
modgrammar.OPTIONAL(UnaryOperator))
def grammar_elem_init(self, session_data):
self.model = self[0].model
if self[1] is not None:
min_repeat = 1
max_repeat = 1
if self[1].is_kleene_star:
min_repeat = 0
max_repeat = model.Element.INFINITY_REPEAT
elif self[1].is_plus:
min_repeat = 1
max_repeat = model.Element.INFINITY_REPEAT
self.model.set_repeat(min_repeat, max_repeat)
for tag in self[1].tags:
self.model.add_tag(tag)
class Alternatives(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (modgrammar.OPTIONAL(AlternativeWeight),
AlternativeRuleExpansion,
modgrammar.ONE_OR_MORE(AlternativeSeparator,
modgrammar.OPTIONAL(AlternativeWeight),
AlternativeRuleExpansion
))
def grammar_elem_init(self, session_data):
self.model = model.Alternatives()
element = self[1].model
if self[0] is not None:
element.weight = self[0].value
self.model.add_element(element)
for i in range(0, len(self[2].elements)):
element = self[2][i][2].model
if self[2][i][1] is not None:
element.weight = self[2][i][1].value
self.model.add_element(element)
class RuleExpansion(modgrammar.Grammar):
grammar_whitespace_mode = 'explicit'
grammar = (Token | QuotedToken | RuleReference | Sequence | Alternatives | Group | OptionalGroup,
modgrammar.OPTIONAL(UnaryOperator))
def grammar_elem_init(self, session_data):
self.model = self[0].model
if self[1] is not None:
min_repeat = 1
max_repeat = 1
if self[1].is_kleene_star:
min_repeat = 0
max_repeat = model.Element.INFINITY_REPEAT
elif self[1].is_plus:
min_repeat = 1
max_repeat = model.Element.INFINITY_REPEAT
self.model.set_repeat(min_repeat, max_repeat)
for tag in self[1].tags:
self.model.add_tag(tag)
class Rule(modgrammar.Grammar):
grammar_whitespace_mode = 'optional'
grammar = (modgrammar.OPTIONAL(PublicModifier),
modgrammar.LITERAL("<"),
JavaIdentifier,
modgrammar.LITERAL(">"),
modgrammar.LITERAL("="),
RuleExpansion,
modgrammar.LITERAL(";"))
def grammar_elem_init(self, session_data):
scope = model.Rule.SCOPE_PRIVATE
if self[0] is not None:
scope = model.Rule.SCOPE_PUBLIC
self.model = model.Rule(name=self[2].value, value=self[5].model, scope=scope)
class Grammar(modgrammar.Grammar):
grammar_whitespace_mode = 'optional'
grammar = (Header, modgrammar.ZERO_OR_MORE(Rule), modgrammar.OPTIONAL(modgrammar.WHITESPACE), modgrammar.EOI)
def grammar_elem_init(self, session_data):
self.model = model.Grammar(name=self[0].name, language=self[0].locale, encoding=self[0].encoding)
for i in range(len(self[1].elements)):
rule = self[1][i].model
self.model.add_rule(rule)
if rule.scope == model.Rule.SCOPE_PUBLIC and self.model.root_rule is None:
self.model.root_rule = rule
| {
"content_hash": "1c5152ee66b9b0c473561295f69f44cc",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 121,
"avg_line_length": 33.3133514986376,
"alnum_prop": 0.5956976934402094,
"repo_name": "ynop/pyspeechgrammar",
"id": "7ec168dc04ecd5200f81bb19120bdf3b88910147",
"size": "12226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyspeechgrammar/jsgf/grammars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58213"
}
],
"symlink_target": ""
} |
"""
========================================
Cell Tracking (:mod:`tracking.core`)
========================================
.. currentmodule:: tracking.core
TITAN cell tracking
================
.. autosummary::
:toctree: generated/
Cell_tracks
"""
#from .cell_tracking import Cell_tracks
from .tracks import Cell_tracks
from .visualization import animate
__all__ = [s for s in dir() if not s.startswith('_')]
| {
"content_hash": "d7369c29d95afac6765cc3c50a434d51",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 23.27777777777778,
"alnum_prop": 0.5322195704057279,
"repo_name": "mhpicel/tracking",
"id": "90eb0aa773644deec03de82ec4a099bae81c6e7f",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracking/core/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "42376"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template
from datetime import datetime, timedelta
import sqlite3
import json
import random
app = Flask(__name__)
def randomRGB():
r, g, b = [random.randint(0,255) for i in range(3)]
return r, g, b, 1
@app.route('/')
def index():
conn = sqlite3.connect("ruuvitag.db")
conn.row_factory = sqlite3.Row
# set hom many days you want to see in charts
N = 30 # show charts for 30 days
date_N_days_ago = str(datetime.now() - timedelta(days=N))
tags = conn.execute("SELECT DISTINCT mac, name FROM sensors WHERE timestamp > '"+date_N_days_ago+"' ORDER BY name, timestamp DESC")
sensors = ['temperature', 'humidity', 'pressure']
sList = {}
datasets = {}
for sensor in sensors:
datasets[sensor] = []
for tag in tags:
if tag['name']:
sList['timestamp'] = []
for sensor in sensors:
sList[sensor] = []
sData = conn.execute("SELECT timestamp, temperature, humidity, pressure FROM sensors WHERE mac = '"+tag['mac']+"' AND timestamp > '"+date_N_days_ago+"' ORDER BY timestamp")
for sRow in sData:
sList['timestamp'].append(str(sRow['timestamp'])[:-3]) # remove seconds from timestamp
for sensor in sensors:
sList[sensor].append(sRow[sensor])
color = randomRGB()
dataset = """{{
label: '{}',
borderColor: 'rgba{}',
fill: false,
lineTension: 0.2,
data: {}
}}"""
for sensor in sensors:
datasets[sensor].append(dataset.format(tag['name'], color, sList[sensor]))
conn.close()
return render_template('ruuvitag.html', time = sList['timestamp'], temperature = datasets['temperature'], humidity = datasets['humidity'], pressure = datasets['pressure'])
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=int('80'))
| {
"content_hash": "049b69c95d033b18c7776522fbdce5f3",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 175,
"avg_line_length": 29.15,
"alnum_prop": 0.6512292738707833,
"repo_name": "dimamedia/RuuviTag-logger",
"id": "261377b5b0ce53f47483caa30edb4ff38103ed25",
"size": "1769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ruuvitag-web.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1981"
},
{
"name": "Python",
"bytes": "5058"
}
],
"symlink_target": ""
} |
import sys, os, unittest, manageconfigs as mc
from optparse import OptionParser
##########################################
def eval1(x,y,z): return x*y+z
def eval2(x,y,z): return x+y+z
@mc.manageConfigs
def sampleFunction(x, y, z=5, func=eval1, cat='test.cat'):
return cat, func(x,y,z)
@mc.manageConfigs
def sampleFunction2(x=5, y=2.45, z=4, func=eval1, cat='test.cat', verbose=True):
return cat, func(x,y,z)
##############################################
class TestManageConfigsFromPython(unittest.TestCase):
def testProgramaticCall(self):
cat, num = sampleFunction(1, 2)
self.assertEquals(cat, 'test.cat')
self.assertEquals(num, 7)
cat, num = sampleFunction(5, 6, 7, cat='test1.cat')
self.assertEquals(cat, 'test1.cat')
self.assertEquals(num, 37)
self.assertRaises(TypeError, sampleFunction)
def testDumpConfig(self):
results, config = sampleFunction(5, 3, 7, cat='smarts', dumpConfig=True)
cat, num = results
self.assertEquals(cat, 'smarts')
self.assertEquals(num, 22)
self.assertEquals(config.x, 5)
self.assertEquals(config.y, 3)
self.assertEquals(config.z, 7)
self.assertEquals(config.cat, 'smarts')
def testExtractKeywords(self):
defaults = sampleFunction.defaults
self.assertEquals(defaults.x, None)
self.assertEquals(defaults.y, None)
self.assertEquals(defaults.z, 5)
self.assertEquals(defaults.cat, 'test.cat')
def testPassConfig(self):
config = sampleFunction.defaults
config.x = 2
config.y = 3
config.z = 1
cat, num = sampleFunction(config = config)
self.assertEquals(cat, 'test.cat')
self.assertEquals(7, num)
def testDefaultsImmutable(self):
default = sampleFunction.defaults
originalX = default.x
default.x = 17
self.assertEquals(originalX, sampleFunction.defaults.x)
def testPassConfigAndArgs(self):
config = sampleFunction.defaults
config.x = 15
originalY = config.y
(cat, num), newConfig = sampleFunction(y = 2, config=config, dumpConfig=True)
self.assertEquals(cat, 'test.cat')
self.assertEquals(num, 35)
self.assertEquals(originalY, config.y)
config.y = 2
self.assertEquals(config, newConfig)
def testPassTooMuchConfig(self):
config = sampleFunction.defaults
config.x = 2
config.y = 3
config.z = 0
config.extra = 77
(cat, num), newConfig = sampleFunction(config=config, dumpConfig=True)
self.assertEquals(cat, 'test.cat')
self.assertEquals(num, 6)
self.assertEquals(newConfig, config)
def testPassDictAsConfig(self):
myconfig = {}
myconfig['x'] = 2
myconfig['y'] = 3
myconfig['z'] = 4
myconfig['cat'] = 'mycat'
myconfig['extra'] = 88
myconfig['func'] = eval1
(cat, num), newConfig = sampleFunction(config=myconfig, dumpConfig=True)
self.assertEquals(cat, 'mycat')
self.assertEquals(num, 10)
self.assertEquals(type(newConfig), type(sampleFunction.defaults))
self.assertEquals(newConfig.x, 2)
self.assertEquals(newConfig.y, 3)
self.assertEquals(newConfig.z, 4)
self.assertEquals(newConfig.cat, 'mycat')
self.assertEquals(newConfig.extra, 88)
class TestManageConfigsWithFiles(unittest.TestCase):
def setUp(self):
self.configFile = 'manageconfigs.test.config'
output = open(self.configFile, 'w')
output.write('''# Config File for Testing manageconfigs.py
x=5
y=7
z=1
cat='mytest.cat'
#This is for a different program
extra=99
''')
self.outputConfig = 'manageconfigs.test2.config'
def tearDown(self):
if os.path.exists(self.configFile):
os.remove(self.configFile)
if os.path.exists(self.outputConfig):
os.remove(self.outputConfig)
def testReadFileConfig(self):
config = sampleFunction.loadConfig(self.configFile)
self.assertEquals(config.x, 5)
self.assertEquals(config.y, 7)
self.assertEquals(config.z, 1)
self.assertEquals(config.cat, 'mytest.cat')
self.assertEquals(config.extra, 99)
def testWriteConfigToFile(self):
config=sampleFunction.defaults
config.toFile(self.outputConfig)
self.assertTrue(os.path.exists(self.outputConfig))
newconfig = sampleFunction.loadConfig(self.outputConfig)
self.assertEquals(config, newconfig)
def testCallWithConfigFile(self):
cat, num = sampleFunction(x=2, config=self.configFile)
self.assertEquals(num, 15)
self.assertEquals(cat, 'mytest.cat')
def testDumpConfigToFile(self):
cat, num = sampleFunction(x=2, y=15, dumpConfig=self.outputConfig)
expectedConfig = sampleFunction.defaults
expectedConfig.x = 2
expectedConfig.y = 15
self.assertEquals(cat, 'test.cat')
self.assertEquals(num, 35)
config = sampleFunction.loadConfig(self.outputConfig)
self.assertEquals(expectedConfig, config)
class TestManageConfigsCommandLine(unittest.TestCase):
def setUp(self):
self.configFile = 'manageconfigs.test.config'
self.dumpFile = 'manageconfigs.dump.config'
output = open(self.configFile, 'w')
output.write('''# Config File for Testing manageconfigs.py
x=5
y=7
z=1
cat='mytest.cat'
#This is for a different program
extra=99
''')
self.stdout = sys.stdout
def tearDown(self):
if os.path.exists(self.configFile):
os.remove(self.configFile)
if os.path.exists(self.dumpFile):
os.remove(self.dumpFile)
sys.stdout = self.stdout
def testAddConfigManagementToParser(self):
parser = OptionParser()
sampleFunction.addConfigToParser(parser)
self.assertTrue(parser.has_option('-c'))
self.assertTrue(parser.has_option('--config'))
parser.parse_args(args=['-c', self.configFile])
self.assertTrue(isinstance(parser.config, mc.Configuration))
def testDumpDefault(self):
parser = OptionParser()
sampleFunction.addConfigToParser(parser)
self.assertTrue(parser.has_option('-d'))
self.assertTrue(parser.has_option('--dump'))
dumpfile = open(self.dumpFile, 'w')
sys.stdout = dumpfile
self.assertRaises(SystemExit, parser.parse_args, args=['-d'])
sys.stdout = self.stdout
dumpfile.close()
self.assertEquals(sampleFunction.loadConfig(self.dumpFile), sampleFunction.defaults)
########################
class TestManageCommandLine(unittest.TestCase):
def setUp(self):
self.configFile = 'manageconfigs.test.config'
output = open(self.configFile, 'w')
output.write('''# Config File for Testing manageconfigs.py
x=5
y=7
z=1
cat='mytest.cat'
#This is for a different program
extra=99
''')
self.outputConfig = 'manageconfigs.test2.config'
def tearDown(self):
if os.path.exists(self.configFile):
os.remove(self.configFile)
if os.path.exists(self.outputConfig):
os.remove(self.outputConfig)
def testPopulateParser(self):
parser = OptionParser()
sampleFunction2.populateParser(parser)
self.assertTrue(parser.has_option('-d'))
self.assertTrue(parser.has_option('--dump'))
dumpfile = open(self.dumpFile, 'w')
sys.stdout = dumpfile
self.assertRaises(SystemExit, parser.parse_args, args=['-d'])
sys.stdout = self.stdout
dumpfile.close()
self.assertEquals(sampleFunction.loadConfig(self.dumpFile), sampleFunction.defaults)
argstring = '-x 3 -y 2.66 --func eval2 --cat mytest.cat --verbose False'
self.assertTrue(parser.has_option('--cat'))
self.assertTrue(parser.has_option('-v'))
self.assertTrue(parser.has_option('--verbpse'))
########################
def test():
testcases = [TestManageConfigsFromPython, TestManageConfigsWithFiles, TestManageConfigsCommandLine, TestManageCommandLine]
suite = unittest.TestSuite(map(unittest.TestLoader().loadTestsFromTestCase,
testcases))
unittest.TextTestRunner(verbosity=2).run(suite)
########################
if __name__ == '__main__':
test()
| {
"content_hash": "91147e7892cc833ccb883dd53b332880",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 126,
"avg_line_length": 25.184971098265898,
"alnum_prop": 0.617282533853569,
"repo_name": "deapplegate/wtgpipeline",
"id": "056a02dbdd8d60d5ee286aa0493b5216319e304d",
"size": "8736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manageconfigs_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "183"
},
{
"name": "C",
"bytes": "7161"
},
{
"name": "C++",
"bytes": "65083"
},
{
"name": "Makefile",
"bytes": "2574"
},
{
"name": "Perl",
"bytes": "38992"
},
{
"name": "Python",
"bytes": "13671330"
},
{
"name": "Roff",
"bytes": "48622"
},
{
"name": "Shell",
"bytes": "3637313"
},
{
"name": "XSLT",
"bytes": "54208"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django import forms
from django.contrib.auth.models import User
from mezzanine.core.admin import TabularDynamicInlineAdmin, SingletonAdmin
from mezzanine.pages.admin import PageAdmin
from models import SiteConfiguration, HomePage, IconBox, UserQuota, QuotaMessage
class IconBoxInline(TabularDynamicInlineAdmin):
model = IconBox
extra = 5
max_num = 5
class HomePageAdmin(PageAdmin):
inlines = (IconBoxInline,)
class UserQuotaForm(forms.ModelForm):
user = forms.ModelChoiceField(
queryset=User.objects.exclude(is_superuser=True).exclude(is_active=False))
class Meta:
model = UserQuota
fields = ['user', 'allocated_value', 'used_value', 'unit', 'zone']
def save(self, *args, **kwargs):
instance = super(UserQuotaForm, self).save(commit=False)
instance.user = self.cleaned_data['user']
return instance
class QuotaAdmin(admin.ModelAdmin):
model = UserQuota
list_display = ('user', 'allocated_value', 'used_value', 'unit', 'zone')
list_filter = ('zone', 'user__username', )
def get_form(self, request, obj=None, **kwargs):
# use a customized form class when adding a UserQuota object so that
# the foreign key user field is available for selection.
if obj is None:
return UserQuotaForm
else:
return super(QuotaAdmin, self).get_form(request, obj, **kwargs)
admin.site.register(HomePage, HomePageAdmin)
admin.site.register(SiteConfiguration, SingletonAdmin)
admin.site.register(UserQuota, QuotaAdmin)
admin.site.register(QuotaMessage, SingletonAdmin)
| {
"content_hash": "5d24af347ec34ea4eefbdb497daa9180",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 82,
"avg_line_length": 31.037735849056602,
"alnum_prop": 0.7082066869300911,
"repo_name": "ResearchSoftwareInstitute/MyHPOM",
"id": "2ed01a3f01d11016c5befb6367ac07553c35ff70",
"size": "1645",
"binary": false,
"copies": "1",
"ref": "refs/heads/myhpom-develop",
"path": "theme/admin.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "399181"
},
{
"name": "HTML",
"bytes": "950570"
},
{
"name": "JavaScript",
"bytes": "2069460"
},
{
"name": "Python",
"bytes": "5006675"
},
{
"name": "R",
"bytes": "4463"
},
{
"name": "Shell",
"bytes": "53077"
},
{
"name": "XSLT",
"bytes": "790987"
}
],
"symlink_target": ""
} |
"""This file contains code for use with "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
from thinkbayes2 import Suite
class M_and_M(Suite):
"""Map from hypothesis (A or B) to probability."""
mix94 = dict(brown=30,
yellow=20,
red=20,
green=10,
orange=10,
tan=10,
blue=0)
mix96 = dict(blue=24,
green=20,
orange=16,
yellow=14,
red=13,
brown=13,
tan=0)
hypoA = dict(bag1=mix94, bag2=mix96)
hypoB = dict(bag1=mix96, bag2=mix94)
hypotheses = dict(A=hypoA, B=hypoB)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: string hypothesis (A or B)
data: tuple of string bag, string color
"""
bag, color = data
mix = self.hypotheses[hypo][bag]
like = mix[color]
return like
def main():
suite = M_and_M('AB')
suite.Update(('bag1', 'yellow'))
suite.Update(('bag2', 'green'))
suite.Print()
if __name__ == '__main__':
main()
| {
"content_hash": "d174f2020078ff4945ca6760adbb0a5d",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 68,
"avg_line_length": 22.89830508474576,
"alnum_prop": 0.533678756476684,
"repo_name": "AllenDowney/ThinkBayes2",
"id": "25482cd62624a966df02882c2c55934e0963177d",
"size": "1351",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/m_and_m.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1684"
},
{
"name": "HTML",
"bytes": "370933"
},
{
"name": "Jupyter Notebook",
"bytes": "11505123"
},
{
"name": "Makefile",
"bytes": "3948"
},
{
"name": "Python",
"bytes": "247283"
},
{
"name": "Stata",
"bytes": "46787"
},
{
"name": "TeX",
"bytes": "390687"
}
],
"symlink_target": ""
} |
import antlr
import LexedParser
import LexedLexer
from iterablefromtokenstream import IterableFromTokenStream
class PepperTokenStreamFromFile( antlr.TokenStream, IterableFromTokenStream ):
def __init__( self, fl ):
self.lexed_parser = LexedParser.Parser(
LexedLexer.Lexer( fl ) )
def nextToken( self ):
ln = self.lexed_parser.line()
if ln is None:
return antlr.CommonToken( type = antlr.Token.EOF_TYPE )
else:
return ln
| {
"content_hash": "2c9094e7c78e7ba23baab65a04f58597",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 26.36842105263158,
"alnum_prop": 0.6646706586826348,
"repo_name": "andybalaam/pepper",
"id": "580363b094bd306f6c8b8552dfa030d40c11a6fa",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old/pepper1/src/parse/peppertokenstreamfromfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5336"
},
{
"name": "CSS",
"bytes": "912"
},
{
"name": "GAP",
"bytes": "22508"
},
{
"name": "HTML",
"bytes": "1300"
},
{
"name": "Haskell",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5175"
},
{
"name": "Python",
"bytes": "570994"
},
{
"name": "Rust",
"bytes": "24407"
},
{
"name": "Shell",
"bytes": "728"
}
],
"symlink_target": ""
} |
"""Basic test of tone synthesis with `muser.live.Synth`"""
import muser.live as live
import math
import time
synth = live.Synth(channels=2)
def synth_on(synth, duration, pause=0):
"""Activate synth's tone generation for a time, then pause."""
synth.toggle()
time.sleep(duration)
synth.toggle()
time.sleep(pause)
def sine_tone(amp, freq, phase=0):
"""Returns a sinusoidal function of time."""
def tone(t):
return amp * math.sin(2 * math.pi * freq * t + phase)
return tone
synth.activate()
synth.connect(synth.outports[0], 'system:playback_1')
synth.connect(synth.outports[1], 'system:playback_2')
# add a 440 Hz tone and play twice for 1s, with a 0.5 pause between
synth.add_synth_function(sine_tone(0.75, 440))
for i in range(2):
synth_on(synth, duration=1, pause=0.5)
# repeat, adding a tone an octave below
synth.add_synth_function(sine_tone(0.75, 220))
for i in range(2):
synth_on(synth, duration=1, pause=0.5)
synth.deactivate()
synth.outports.clear()
synth.close()
| {
"content_hash": "e764f3638b0c4b35dcf9d21363c89620",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 68,
"avg_line_length": 27.026315789473685,
"alnum_prop": 0.688412852969815,
"repo_name": "laporte-m/muser",
"id": "3214d68fa38d19f38614b0c9ae58e997676b1446",
"size": "1027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/synth_tone.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74925"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['Lag1Trend'] , ['BestCycle'] , ['MLP'] ); | {
"content_hash": "537eb17aec6aa51e80287f1122836698",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 75,
"avg_line_length": 37,
"alnum_prop": 0.6959459459459459,
"repo_name": "antoinecarme/pyaf",
"id": "e82818715f07add59aa63fd0c3c435a1372270df",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_Lag1Trend_BestCycle_MLP.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from pyravendb.connection.requests_factory import HttpRequestsFactory
from pyravendb.custom_exceptions import exceptions
from pyravendb.d_commands import database_commands
from pyravendb.data.document_convention import DocumentConvention
from pyravendb.hilo.hilo_generator import HiloGenerator
from pyravendb.data.database import DatabaseDocument
from pyravendb.store.document_session import documentsession
from pyravendb.tools.utils import Utils
from pyravendb.data.operations import Operations
import traceback
import uuid
class documentstore(object):
def __init__(self, url=None, database=None, api_key=None):
self.url = url
self.database = database
self.conventions = DocumentConvention()
self.api_key = api_key
self._requests_handler = HttpRequestsFactory(url, database, self.conventions, api_key=self.api_key)
self._database_commands = None
self._initialize = False
self.generator = None
self._operations = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@property
def operations(self):
self._assert_initialize()
return self._operations
@property
def database_commands(self):
self._assert_initialize()
return self._database_commands
def initialize(self):
if not self._initialize:
self._operations = Operations(self._requests_handler)
self._database_commands = database_commands.DatabaseCommands(self._requests_handler)
if self.database is None:
raise exceptions.InvalidOperationException("None database is not valid")
if not self.database.lower() == self.conventions.system_database:
path = "Raven/Databases/{0}".format(self.database)
response = self._requests_handler.check_database_exists("docs?id=" + Utils.quote_key(path))
# here we unsure database exists if not create new one
if response.status_code == 404:
try:
raise exceptions.ErrorResponseException(
"Could not open database named: {0}, database does not exists".format(self.database))
except exceptions.ErrorResponseException:
print(traceback.format_exc())
self._database_commands.admin_commands.create_database(
DatabaseDocument(self.database, {"Raven/DataDir": "~\\{0}".format(self.database)}))
self._requests_handler.get_replication_topology()
self.generator = HiloGenerator(self.conventions.max_ids_to_catch, self._database_commands)
self._initialize = True
def _assert_initialize(self):
if not self._initialize:
raise exceptions.InvalidOperationException(
"You cannot open a session or access the database commands before initializing the document store.\
Did you forget calling initialize()?")
def open_session(self, database=None, api_key=None, force_read_from_master=False):
self._assert_initialize()
session_id = uuid.uuid4()
database_commands_for_session = self._database_commands
if database is not None:
requests_handler = HttpRequestsFactory(self.url, database, self.conventions, force_get_topology=True,
api_key=api_key)
path = "Raven/Databases/{0}".format(database)
response = requests_handler.check_database_exists("docs?id=" + Utils.quote_key(path))
if response.status_code != 200:
raise exceptions.ErrorResponseException("Could not open database named:{0}".format(database))
database_commands_for_session = database_commands.DatabaseCommands(requests_handler)
return documentsession(database, self, database_commands_for_session, session_id, force_read_from_master)
def generate_id(self, entity):
return self.generator.generate_document_id(entity, self.conventions, self._requests_handler)
| {
"content_hash": "3d6e6d05a22a1945b89627f05e4dc489",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 115,
"avg_line_length": 48.92941176470588,
"alnum_prop": 0.6576100024044241,
"repo_name": "IdanHaim/RavenDB-Python-Client",
"id": "53b4613fea95d97b13c7d0716c46ee376da9675d",
"size": "4159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyravendb/store/document_store.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "150420"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import torch
import pyro.distributions as dist
from pyro import poutine
from pyro.distributions.util import sum_leftmost
from pyro.poutine.messenger import Messenger
from pyro.poutine.util import site_is_subsample
def _make_cls(base, static_attrs, instance_attrs, parent_linkage=None):
r"""
Dynamically create classes named `_ + base.__name__`, which extend the
base class with other optional instance and class attributes, and have
a custom `.expand` method to propagate these attributes on expanded
instances.
:param cls base: Base class.
:param dict static_attrs: static attributes to add to class.
:param dict instance_attrs: instance attributes for initialization.
:param str parent_linkage: attribute in the parent class that holds
a reference to the distribution class.
:return cls: dynamically generated class.
"""
def _expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(cls, _instance)
for attr in instance_attrs:
setattr(new, attr, getattr(self, attr))
if parent_linkage:
setattr(new.parent, parent_linkage, new)
return base.expand(self, batch_shape, _instance=new)
name = "_" + base.__name__
cls = type(name, (base,), instance_attrs)
for k, v in static_attrs.items():
setattr(cls, k, v)
cls.expand = _expand
return cls
def _latent(base, parent):
return _make_cls(
base, {"collapsible": True}, {"site_name": None, "parent": parent}, "_latent"
)
def _conditional(base, parent):
return _make_cls(
base, {"marginalize_latent": True}, {"parent": parent}, "_conditional"
)
def _compound(base, parent):
return _make_cls(base, {}, {"parent": parent})
class BetaBinomialPair:
def __init__(self):
self._latent = None
self._conditional = None
def latent(self, *args, **kwargs):
self._latent = _latent(dist.Beta, parent=self)(*args, **kwargs)
return self._latent
def conditional(self, *args, **kwargs):
self._conditional = _conditional(dist.Binomial, parent=self)(*args, **kwargs)
return self._conditional
def posterior(self, obs):
concentration1 = self._latent.concentration1
concentration0 = self._latent.concentration0
total_count = self._conditional.total_count
reduce_dims = len(obs.size()) - len(concentration1.size())
# Unexpand total_count to have the same shape as concentration0.
# Raise exception if this isn't possible.
total_count = sum_leftmost(total_count, reduce_dims)
summed_obs = sum_leftmost(obs, reduce_dims)
return dist.Beta(
concentration1 + summed_obs,
total_count + concentration0 - summed_obs,
validate_args=self._latent._validate_args,
)
def compound(self):
return _compound(dist.BetaBinomial, parent=self)(
concentration1=self._latent.concentration1,
concentration0=self._latent.concentration0,
total_count=self._conditional.total_count,
)
class GammaPoissonPair:
def __init__(self):
self._latent = None
self._conditional = None
def latent(self, *args, **kwargs):
self._latent = _latent(dist.Gamma, parent=self)(*args, **kwargs)
return self._latent
def conditional(self, *args, **kwargs):
self._conditional = _conditional(dist.Poisson, parent=self)(*args, **kwargs)
return self._conditional
def posterior(self, obs):
concentration = self._latent.concentration
rate = self._latent.rate
reduce_dims = len(obs.size()) - len(rate.size())
num_obs = obs.shape[:reduce_dims].numel()
summed_obs = sum_leftmost(obs, reduce_dims)
return dist.Gamma(concentration + summed_obs, rate + num_obs)
def compound(self):
return _compound(dist.GammaPoisson, parent=self)(
concentration=self._latent.concentration, rate=self._latent.rate
)
class UncollapseConjugateMessenger(Messenger):
r"""
Replay regular sample sites in addition to uncollapsing any collapsed
conjugate sites.
"""
def __init__(self, trace):
"""
:param trace: a trace whose values should be reused
Constructor.
Stores trace in an attribute.
"""
self.trace = trace
super().__init__()
def _pyro_sample(self, msg):
is_collapsible = getattr(msg["fn"], "collapsible", False)
# uncollapse conjugate sites.
if is_collapsible:
conj_node, parent = None, None
for site_name in self.trace.observation_nodes + self.trace.stochastic_nodes:
parent = getattr(self.trace.nodes[site_name]["fn"], "parent", None)
if parent is not None and parent._latent.site_name == msg["name"]:
conj_node = self.trace.nodes[site_name]
break
assert (
conj_node is not None
), "Collapsible latent site `{}` with no corresponding conjugate site.".format(
msg["name"]
)
msg["fn"] = parent.posterior(conj_node["value"])
msg["value"] = msg["fn"].sample()
# regular replay behavior.
else:
name = msg["name"]
if name in self.trace:
guide_msg = self.trace.nodes[name]
if msg["is_observed"]:
return None
if guide_msg["type"] != "sample":
raise RuntimeError("site {} must be sample in trace".format(name))
msg["done"] = True
msg["value"] = guide_msg["value"]
msg["infer"] = guide_msg["infer"]
def uncollapse_conjugate(fn=None, trace=None):
r"""
This is similar to :function:`~pyro.poutine.replay` poutine, but in addition to
replaying the values at sample sites from the ``trace`` in the original callable
``fn`` when the same sites are sampled, this also "uncollapses" any observed
compound distributions (defined in :module:`pyro.distributions.conjugate`)
by sampling the originally collapsed parameter values from its posterior distribution
followed by observing the data with the sampled parameter values.
"""
msngr = UncollapseConjugateMessenger(trace)
return msngr(fn) if fn is not None else msngr
class CollapseConjugateMessenger(Messenger):
def _pyro_sample(self, msg):
is_collapsible = getattr(msg["fn"], "collapsible", False)
marginalize_latent = getattr(msg["fn"], "marginalize_latent", False)
if is_collapsible:
msg["fn"].site_name = msg["name"]
msg["stop"] = True
elif marginalize_latent:
msg["fn"] = msg["fn"].parent.compound()
else:
return
def collapse_conjugate(fn=None):
r"""
This replaces a latent-observed pair by collapsing the latent site
(whose distribution has attribute `collapsible=True`), and replacing the
observed site (whose distribution has attribute `marginalize_latent=True`)
with a compound probability distribution that marginalizes out the latent
site.
"""
msngr = CollapseConjugateMessenger()
return msngr(fn) if fn is not None else msngr
def posterior_replay(model, posterior_samples, *args, **kwargs):
r"""
Given a model and samples from the posterior (potentially with conjugate sites
collapsed), return a `dict` of samples from the posterior with conjugate sites
uncollapsed. Note that this can also be used to generate samples from the
posterior predictive distribution.
:param model: Python callable.
:param dict posterior_samples: posterior samples keyed by site name.
:param args: arguments to `model`.
:param kwargs: keyword arguments to `model`.
:return: `dict` of samples from the posterior.
"""
posterior_samples = posterior_samples.copy()
num_samples = kwargs.pop("num_samples", None)
assert (
posterior_samples or num_samples
), "`num_samples` must be provided if `posterior_samples` is empty."
if num_samples is None:
num_samples = list(posterior_samples.values())[0].shape[0]
return_samples = defaultdict(list)
for i in range(num_samples):
conditioned_nodes = {k: v[i] for k, v in posterior_samples.items()}
collapsed_trace = poutine.trace(
poutine.condition(collapse_conjugate(model), conditioned_nodes)
).get_trace(*args, **kwargs)
trace = poutine.trace(uncollapse_conjugate(model, collapsed_trace)).get_trace(
*args, **kwargs
)
for name, site in trace.iter_stochastic_nodes():
if not site_is_subsample(site):
return_samples[name].append(site["value"])
return {k: torch.stack(v) for k, v in return_samples.items()}
| {
"content_hash": "37e8df10b3c2ab326b2669324784f936",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 91,
"avg_line_length": 37.27800829875519,
"alnum_prop": 0.6342386464826358,
"repo_name": "uber/pyro",
"id": "e1332eb652c26bf88f94e941116bbdc2fefc6cb6",
"size": "9073",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pyro/contrib/conjugate/infer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "6121"
},
{
"name": "CSS",
"bytes": "478"
},
{
"name": "Dockerfile",
"bytes": "1635"
},
{
"name": "Makefile",
"bytes": "6857"
},
{
"name": "Python",
"bytes": "3388193"
},
{
"name": "Shell",
"bytes": "6465"
},
{
"name": "TeX",
"bytes": "3649"
}
],
"symlink_target": ""
} |
import sys
import os
class Path:
"""Implementation of the PATH programming language in Python."""
# This class uses the following data attributes:
# x: current x position
# y: current y position
# p: current memory cell pointer
# d: current direction
# s: whether the next cell is being skipped
# mem: array of memory cells
# prog: 2-dimensional array of characters that make up the program
# plugins: array of plug-in objects for the interpreter to use
# func_in: function to use for input
# func_out: function to use for output
# plug_lock: plugin the interpreter is locked on
# verbose: if true, enable debug messages
def __init__(self):
"""Initialize the class."""
self.PATH_DIRECTION_RIGHT = 1
self.PATH_DIRECTION_DOWN = 2
self.PATH_DIRECTION_LEFT = 3
self.PATH_DIRECTION_UP = 4
self.func_in = sys.stdin.read
self.func_out = sys.stdout.write
self.plug_lock = None
self.plugins = []
def __add__(self, x):
"""Step thru x symbols. Return false if end of program encountered."""
ret = 0
for i in range(x):
ret = self.step()
if not ret:
return ret
return ret
def addplugin(self, plugin):
"""This method is called internally by plugins.
(To add a plugin to the interpreter: 'execfile(plugin, {"glob_path":prog})') """
self.plugins.append(plugin)
def debug(self, msg):
"""Print a debug message."""
if self.verbose:
self.errprint("({},{}) {}".format(self.x, self.y, msg))
def dir2string(self, d):
"""Get the string representation of a direction id."""
if d == self.PATH_DIRECTION_RIGHT:
return "right"
elif d == self.PATH_DIRECTION_DOWN:
return "down"
elif d == self.PATH_DIRECTION_LEFT:
return "left"
elif d == self.PATH_DIRECTION_UP:
return "up"
def errprint(self, msg):
"""Print a message to stderr."""
sys.stderr.write(os.path.basename(sys.argv[0]) + ": " + msg + "\n")
def normalize_line_length(self):
if any(len(l) for l in self.prog):
longest = max(len(l) for l in self.prog)
for l in range(len(self.prog)):
# "{:{}}" works as a format string too, but I prefer
# using explicit indices when nesting fields
self.prog[l] = '{0:{1}}'.format(self.prog[l], longest)
def load_prog_file(self, filename):
"""Load a new program file into the interpreter."""
try:
with open(filename) as f:
self.prog = f.readlines()
if self.prog[0].startswith("#!"):
self.prog = self.prog[1:]
self.normalize_line_length()
except IOError:
self.errprint("can't open file '{}'".format(filename))
sys.exit(1)
self.reset()
def load_prog_array(self, progarray):
"""Load a new program directly into the interpreter."""
self.prog = progarray
self.normalize_line_length()
self.reset()
def lock(self, plugin):
"""Lock the interpreter on a specific plugin. (Use path.lock(None) to unlock.)"""
self.plug_lock = plugin
def redefine_io(self, infunc, outfunc):
"""Redefine the input and output functions used by the , and . symbols.
(Defaults are sys.stdin.read for input and sys.stdout.write for output."""
self.func_in = infunc
self.func_out = outfunc
def reset(self):
"""Reset the program state and restart the program."""
self.x = 0
self.y = 0
self.p = 0
self.d = self.PATH_DIRECTION_RIGHT
self.s = False
self.mem = [0]
self.verbose = False
for ny in range(len(self.prog)):
for nx in range(len(self.prog[ny])):
if self.prog[ny][nx] == '$':
self.y = ny
self.x = nx
def run(self):
"""Run the entire program."""
while not self.step():
pass
def runplugins(self):
"""Run all the loaded plugins on the current symbol."""
for plugin in self.plugins:
if not plugin.call(self):
return False
return True
def step(self):
"""
Step through a single symbol of the program.
Returns True if end of program encountered, False if
another step should be executed.
"""
cursym = self.prog[self.y][self.x]
if self.s:
self.s = False
elif self.plug_lock is not None:
self.plug_lock.call(self)
elif not self.runplugins():
pass
elif cursym == '$':
self.debug("Start")
elif cursym == '#':
self.debug("End")
return True
elif cursym == '!':
self.s = True
self.debug("Skip next symbol")
elif cursym == '}':
self.p += 1
if self.p > len(self.mem) - 1:
self.mem.append(0)
self.debug("New memory cell: {}".format(self.p))
elif cursym == '{':
if self.p > 0:
self.p -= 1
self.debug("New memory cell: {}".format(self.p))
elif cursym == '/':
if self.d == self.PATH_DIRECTION_RIGHT:
self.d = self.PATH_DIRECTION_UP
elif self.d == self.PATH_DIRECTION_DOWN:
self.d = self.PATH_DIRECTION_LEFT
elif self.d == self.PATH_DIRECTION_LEFT:
self.d = self.PATH_DIRECTION_DOWN
elif self.d == self.PATH_DIRECTION_UP:
self.d = self.PATH_DIRECTION_RIGHT
self.debug("New direction: {}".format(self.dir2string(self.d)))
elif cursym == '\\':
if self.d == self.PATH_DIRECTION_RIGHT:
self.d = self.PATH_DIRECTION_DOWN
elif self.d == self.PATH_DIRECTION_DOWN:
self.d = self.PATH_DIRECTION_RIGHT
elif self.d == self.PATH_DIRECTION_LEFT:
self.d = self.PATH_DIRECTION_UP
elif self.d == self.PATH_DIRECTION_UP:
self.d = self.PATH_DIRECTION_LEFT
self.debug("New direction: {}".format(self.dir2string(self.d)))
elif cursym == '>':
if self.mem[self.p] != 0:
self.d = self.PATH_DIRECTION_RIGHT
self.debug("New direction: {}".format(self.dir2string(self.d)))
elif cursym == 'v':
if self.mem[self.p] != 0:
self.d = self.PATH_DIRECTION_DOWN
self.debug("New direction: {}".format(self.dir2string(self.d)))
elif cursym == '<':
if self.mem[self.p] != 0:
self.d = self.PATH_DIRECTION_LEFT
self.debug("New direction: {}".format(self.dir2string(self.d)))
elif cursym == '^':
if self.mem[self.p] != 0:
self.d = self.PATH_DIRECTION_UP
self.debug("New direction: {}".format(self.dir2string(self.d)))
elif cursym == '+':
self.mem[self.p] += 1
if self.mem[self.p] == 256:
self.mem[self.p] = 0
self.debug("Incremented memory cell {} to {}".format(self.p, self.mem[self.p]))
elif cursym == '-':
self.mem[self.p] -= 1
if self.mem[self.p] == -1:
self.mem[self.p] = 255
self.debug("Decremented memory cell {} to {}".format(self.p, self.mem[self.p]))
elif cursym == ',':
self.mem[self.p] = ord(self.func_in(1))
self.debug("Inputted {} to memory cell {}".format(self.mem[self.p], self.p))
elif cursym == '.':
self.func_out(chr(self.mem[self.p]))
self.debug("Outputted {} from memory cell {}".format(self.mem[self.p], self.p))
if self.d == self.PATH_DIRECTION_RIGHT:
self.x += 1
elif self.d == self.PATH_DIRECTION_DOWN:
self.y += 1
elif self.d == self.PATH_DIRECTION_LEFT:
self.x -= 1
elif self.d == self.PATH_DIRECTION_UP:
self.y -= 1
try:
self.prog[self.y][self.x]
if self.x < 0:
raise IndexError
if self.y < 0:
raise IndexError
except IndexError:
self.debug("Ran off the side")
return True
return False
| {
"content_hash": "61b4f3bd306080222fbbc451310fd07b",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 91,
"avg_line_length": 35.90376569037657,
"alnum_prop": 0.5280270364759352,
"repo_name": "mruffalo/path",
"id": "52730e6f1dd262aede8560ada89b1a567dcfc07a",
"size": "9698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pathlang.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18655"
}
],
"symlink_target": ""
} |
from flask import Flask
from bugsnag.flask import handle_exceptions
import bugsnag.notification
from tests.utils import IntegrationTest
class SentinelError(RuntimeError):
pass
class TestFlask(IntegrationTest):
def setUp(self):
super(TestFlask, self).setUp()
bugsnag.configure(use_ssl=False,
endpoint=self.server.address,
api_key='3874876376238728937',
notify_release_stages=['dev'],
release_stage='dev',
asynchronous=False)
def test_bugsnag_middleware_working(self):
app = Flask("bugsnag")
@app.route("/hello")
def hello():
return "OK"
handle_exceptions(app)
resp = app.test_client().get('/hello')
self.assertEqual(resp.data, b'OK')
self.assertEqual(0, len(self.server.received))
def test_bugsnag_crash(self):
app = Flask("bugsnag")
@app.route("/hello")
def hello():
raise SentinelError("oops")
handle_exceptions(app)
app.test_client().get('/hello')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
self.assertEqual(payload['events'][0]['exceptions'][0]['errorClass'],
'test_flask.SentinelError')
self.assertEqual(payload['events'][0]['metaData']['request']['url'],
'http://localhost/hello')
def test_bugsnag_notify(self):
app = Flask("bugsnag")
@app.route("/hello")
def hello():
bugsnag.notify(SentinelError("oops"))
return "OK"
handle_exceptions(app)
app.test_client().get('/hello')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
self.assertEqual(payload['events'][0]['metaData']['request']['url'],
'http://localhost/hello')
def test_bugsnag_custom_data(self):
meta_data = [{"hello": {"world": "once"}},
{"again": {"hello": "world"}}]
app = Flask("bugsnag")
@app.route("/hello")
def hello():
bugsnag.configure_request(meta_data=meta_data.pop())
raise SentinelError("oops")
handle_exceptions(app)
with app.test_client() as client:
client.get('/hello')
client.get('/hello')
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertEqual(event['metaData'].get('hello'), None)
self.assertEqual(event['metaData']['again']['hello'], 'world')
payload = self.server.received[1]['json_body']
event = payload['events'][0]
self.assertEqual(event['metaData']['hello']['world'], 'once')
self.assertEqual(event['metaData'].get('again'), None)
self.assertEqual(2, len(self.server.received))
def test_bugsnag_includes_posted_json_data(self):
app = Flask("bugsnag")
@app.route("/ajax", methods=["POST"])
def hello():
raise SentinelError("oops")
handle_exceptions(app)
app.test_client().post(
'/ajax', data='{"key": "value"}', content_type='application/json')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertEqual(event['exceptions'][0]['errorClass'],
'test_flask.SentinelError')
self.assertEqual(event['metaData']['request']['url'],
'http://localhost/ajax')
self.assertEqual(event['metaData']['request']['data'],
dict(key='value'))
def test_bugsnag_add_metadata_tab(self):
app = Flask("bugsnag")
@app.route("/form", methods=["PUT"])
def hello():
bugsnag.add_metadata_tab("account", {"id": 1, "premium": True})
bugsnag.add_metadata_tab("account", {"premium": False})
raise SentinelError("oops")
handle_exceptions(app)
app.test_client().put(
'/form', data='_data', content_type='application/octet-stream')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertEqual(event['metaData']['account']['premium'], False)
self.assertEqual(event['metaData']['account']['id'], 1)
def test_bugsnag_includes_unknown_content_type_posted_data(self):
app = Flask("bugsnag")
@app.route("/form", methods=["PUT"])
def hello():
raise SentinelError("oops")
handle_exceptions(app)
app.test_client().put(
'/form', data='_data', content_type='application/octet-stream')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertEqual(event['exceptions'][0]['errorClass'],
'test_flask.SentinelError')
self.assertEqual(event['metaData']['request']['url'],
'http://localhost/form')
body = event['metaData']['request']['data']['body']
self.assertTrue('_data' in body)
def test_bugsnag_notify_with_custom_context(self):
app = Flask("bugsnag")
@app.route("/hello")
def hello():
bugsnag.notify(SentinelError("oops"),
context="custom_context_notification_testing")
return "OK"
handle_exceptions(app)
app.test_client().get('/hello')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
self.assertEqual(payload['events'][0]['context'],
'custom_context_notification_testing')
def test_flask_intergration_includes_middleware_severity(self):
app = Flask("bugsnag")
@app.route("/test")
def test():
raise SentinelError("oops")
handle_exceptions(app)
app.test_client().get("/test")
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertTrue(event['unhandled'])
self.assertEqual(event['severityReason'], {
"type": "unhandledExceptionMiddleware",
"attributes": {
"framework": "Flask"
}
})
| {
"content_hash": "64c5bc9961f1eca646b5a87063c68428",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 78,
"avg_line_length": 34.362694300518136,
"alnum_prop": 0.5604644149577804,
"repo_name": "overplumbum/bugsnag-python",
"id": "5a2fac5665fdb7848b035cac80a383feed2cd7e4",
"size": "6632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integrations/test_flask.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "409"
},
{
"name": "Python",
"bytes": "155119"
},
{
"name": "Shell",
"bytes": "186"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division
import os
import re
import urllib2
import random
from django.core.files.base import File, ContentFile
from django.core.files.storage import Storage, default_storage
from django.utils.functional import LazyObject, empty
from sorl.thumbnail import default
from sorl.thumbnail.conf import settings
from sorl.thumbnail.compat import (json, urlopen, urlparse, urlsplit,
quote, quote_plus,
URLError, force_unicode, encode)
from sorl.thumbnail.helpers import ThumbnailError, tokey, get_module_class, deserialize
from sorl.thumbnail.parsers import parse_geometry
url_pat = re.compile(r'^(https?|ftp):\/\/')
def serialize_image_file(image_file):
if image_file.size is None:
raise ThumbnailError('Trying to serialize an ``ImageFile`` with a '
'``None`` size.')
data = {
'name': image_file.name,
'storage': image_file.serialize_storage(),
'size': image_file.size,
}
return json.dumps(data)
def deserialize_image_file(s):
data = deserialize(s)
class LazyStorage(LazyObject):
def _setup(self):
self._wrapped = get_module_class(data['storage'])()
image_file = ImageFile(data['name'], LazyStorage())
image_file.set_size(data['size'])
return image_file
class BaseImageFile(object):
size = []
def exists(self):
raise NotImplementedError()
@property
def width(self):
return self.size[0]
x = width
@property
def height(self):
return self.size[1]
y = height
def is_portrait(self):
return self.y > self.x
@property
def ratio(self):
return float(self.x) / float(self.y)
@property
def url(self):
raise NotImplementedError()
src = url
class ImageFile(BaseImageFile):
_size = None
def __init__(self, file_, storage=None):
if not file_:
raise ThumbnailError('File is empty.')
# figure out name
if hasattr(file_, 'name'):
self.name = file_.name
else:
self.name = force_unicode(file_)
# figure out storage
if storage is not None:
self.storage = storage
elif hasattr(file_, 'storage'):
self.storage = file_.storage
elif url_pat.match(self.name):
self.storage = UrlStorage()
else:
self.storage = default_storage
if hasattr(self.storage, 'location'):
location = self.storage.location
if not self.storage.location.endswith("/"):
location += "/"
if self.name.startswith(location):
self.name = self.name[len(location):]
def __unicode__(self):
return self.name
def exists(self):
return self.storage.exists(self.name)
def set_size(self, size=None):
# set the size if given
if size is not None:
pass
# Don't try to set the size the expensive way if it already has a
# value.
elif self._size is not None:
return
elif hasattr(self.storage, 'image_size'):
# Storage backends can implement ``image_size`` method that
# optimizes this.
size = self.storage.image_size(self.name)
else:
# This is the worst case scenario
image = default.engine.get_image(self)
size = default.engine.get_image_size(image)
self._size = list(size)
@property
def size(self):
return self._size
@property
def url(self):
return self.storage.url(self.name)
def read(self):
return self.storage.open(self.name).read()
def write(self, content):
if not isinstance(content, File):
content = ContentFile(content)
self._size = None
self.name = self.storage.save(self.name, content)
return self.name
def delete(self):
return self.storage.delete(self.name)
def serialize_storage(self):
if isinstance(self.storage, LazyObject):
# if storage is wrapped in a lazy object we need to get the real
# thing.
if self.storage._wrapped is empty:
self.storage._setup()
cls = self.storage._wrapped.__class__
else:
cls = self.storage.__class__
return '%s.%s' % (cls.__module__, cls.__name__)
@property
def key(self):
return tokey(self.name, self.serialize_storage())
def serialize(self):
return serialize_image_file(self)
class DummyImageFile(BaseImageFile):
def __init__(self, geometry_string):
self.size = parse_geometry(
geometry_string,
settings.THUMBNAIL_DUMMY_RATIO,
)
def exists(self):
return True
@property
def url(self):
if settings.THUMBNAIL_DUMMY_SOURCE == 'placereddit':
reddit = random.choice(getattr(settings,'THUMBNAIL_SUBREDDITS',['featured']))
n = random.choice(range(10))
if '/' in reddit:
url = 'http://placereddit.com/%s/%s/%s/%s/'
else:
url = 'http://placereddit.com/r/%s/%s/%s/%s/'
return url%(reddit,self.x,self.y,n)
return settings.THUMBNAIL_DUMMY_SOURCE % (
{'width': self.x, 'height': self.y}
)
class UrlStorage(Storage):
def normalize_url(self, url, charset='utf-8'):
url = encode(url, charset, 'ignore')
scheme, netloc, path, qs, anchor = urlsplit(url)
# Encode to utf8 to prevent urllib KeyError
path = encode(path, charset, 'ignore')
path = quote(path, '/%')
qs = quote_plus(qs, ':&%=')
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
def open(self, name, mode='rb'):
return urlopen(self.normalize_url(name))
def exists(self, name):
try:
self.open(name)
except URLError:
return False
return True
def url(self, name):
return name
def delete(self, name):
pass
def delete_all_thumbnails():
storage = default.storage
path = os.path.join(storage.location, settings.THUMBNAIL_PREFIX)
def walk(path):
dirs, files = storage.listdir(path)
for f in files:
storage.delete(os.path.join(path, f))
for d in dirs:
directory = os.path.join(path, d)
walk(directory)
try:
full_path = storage.path(directory)
except Exception:
continue
os.rmdir(full_path)
walk(path)
| {
"content_hash": "b10d3b02743067a5c6801b9ec1c499ea",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 89,
"avg_line_length": 27.53061224489796,
"alnum_prop": 0.5774647887323944,
"repo_name": "chriscauley/sorl-thumbnail",
"id": "f66b340386d8bf094dbf8ba39693a31360f707c7",
"size": "6763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sorl/thumbnail/images.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3522"
},
{
"name": "Python",
"bytes": "137608"
},
{
"name": "Shell",
"bytes": "508"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20171201_0344'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='group',
),
migrations.AddField(
model_name='profile',
name='groups',
field=models.OneToOneField(default=0, on_delete=django.db.models.deletion.CASCADE, to='blog.Group'),
preserve_default=False,
),
migrations.AlterField(
model_name='group',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| {
"content_hash": "ba2bbfeed49ccd2ef7a1372f2e95954a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 112,
"avg_line_length": 28.06896551724138,
"alnum_prop": 0.5872235872235873,
"repo_name": "AShedko/kicktravel",
"id": "5f08ea2e9e30c34ae234ee18ee176f96d50c87b9",
"size": "887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/migrations/0003_auto_20171201_0347.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "376"
},
{
"name": "Python",
"bytes": "5832"
}
],
"symlink_target": ""
} |
import logging
from config import ACCOUNTING_MAIL_RECIPIENT, LOG_LEVEL, REDIS_URL, TIMEZONE, UPDATE_STRIPE_FEES
from datetime import datetime, timedelta
from pytz import timezone
from celery import Celery
import redis
from charges import amount_to_charge, calculate_amount_fees, charge, ChargeException
from npsp import Opportunity
from util import send_email, update_fees
zone = timezone(TIMEZONE)
celery = Celery()
log_level = logging.getLevelName(LOG_LEVEL)
root = logging.getLogger()
root.setLevel(log_level)
class Log(object):
"""
This encapulates sending to the console/stdout and email all in one.
"""
def __init__(self):
self.log = list()
def it(self, string):
"""
Add something to the log.
"""
logging.debug(string)
self.log.append(string)
def send(self):
"""
Send the assembled log out as an email.
"""
body = "\n".join(self.log)
recipient = ACCOUNTING_MAIL_RECIPIENT
subject = "Batch run"
send_email(body=body, recipient=recipient, subject=subject)
class AlreadyExecuting(Exception):
"""
Here to show when more than one job of the same type is running.
"""
pass
class Lock(object):
"""
Claim an exclusive lock. Using Redis.
"""
def __init__(self, key):
self.key = key
self.connection = redis.from_url(REDIS_URL)
def acquire(self):
if self.connection.get(self.key):
raise AlreadyExecuting
self.connection.setex(name=self.key, value="bar", time=1200)
def append(self, key, value):
if self.connection.get(key):
self.connection.setex(name=key, value=value, time=1200)
def release(self):
self.connection.delete(self.key)
# TODO stop sending this email and just rely on Sentry and logs?
@celery.task()
def charge_cards():
lock = Lock(key="charge-cards-lock")
lock.acquire()
log = Log()
log.it("---Starting batch card job...")
three_days_ago = (datetime.now(tz=zone) - timedelta(days=14)).strftime("%Y-%m-%d")
today = datetime.now(tz=zone).strftime("%Y-%m-%d")
opportunities = Opportunity.list(begin=three_days_ago, end=today)
log.it("---Processing charges...")
log.it(f"Found {len(opportunities)} opportunities available to process.")
for opportunity in opportunities:
if not opportunity.stripe_customer_id:
continue
amount = amount_to_charge(opportunity)
log.it(
f"---- Charging ${amount} to {opportunity.stripe_customer_id} ({opportunity.name})"
)
try:
charge(opportunity)
except ChargeException as e:
logging.info("Batch charge error")
e.send_slack_notification()
log.send()
lock.release()
@celery.task()
def update_ach_charges():
lock = Lock(key='update-ach-charges-lock')
lock.acquire()
log = Log()
log.it('---Starting batch ach job...')
log.it('---Checking for status changes on ACH charges...')
three_days_ago = (datetime.now(tz=zone) - timedelta(days=3)).strftime("%Y-%m-%d")
today = datetime.now(tz=zone).strftime("%Y-%m-%d")
opportunities = Opportunity.list(begin=three_days_ago, end=today, stage_name="ACH Pending")
for opportunity in opportunities:
if not opportunity.stripe_customer_id:
continue
amount = amount_to_charge(opportunity)
log.it(
f"---- ACH Charging ${amount} to {opportunity.stripe_customer_id} ({opportunity.name})"
)
try:
charge(opportunity)
except ChargeException as e:
logging.info("ACH batch charge error")
e.send_slack_notification()
log.send()
lock.release()
@celery.task()
def save_stripe_fee():
log = Log()
log.it('---Starting batch stripe fee job...')
if UPDATE_STRIPE_FEES is False:
log.it('---Update fee is false. Get out.')
return
lock = Lock(key='save-stripe-fee-lock')
lock.acquire()
query = """
SELECT Id, Name, npe03__Amount__c, Stripe_Customer_Id__c, Stripe_Card__c, Stripe_Bank_Account__c, Card_type__c, Stripe_Payment_Type__c, Stripe_Agreed_to_pay_fees__c, Stripe_Transaction_Fee__c
FROM npe03__Recurring_Donation__c
WHERE npe03__Open_Ended_Status__c = 'Open'
AND Stripe_Transaction_Fee__c = null
AND Stripe_Customer_Id__c != ''
ORDER BY npe03__Date_Established__c DESC
LIMIT 50
"""
try:
update_fees(query, log, 'recurring')
finally:
lock.release()
if __name__ == "__main__":
charge_cards()
| {
"content_hash": "52d2e97181fb069f1be9960d6f3affc0",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 199,
"avg_line_length": 25.313513513513513,
"alnum_prop": 0.618193465727098,
"repo_name": "MinnPost/salesforce-stripe",
"id": "55243ca057ad381738d29afb149c7f6878990f46",
"size": "4683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "134536"
},
{
"name": "Dockerfile",
"bytes": "376"
},
{
"name": "HTML",
"bytes": "117423"
},
{
"name": "JavaScript",
"bytes": "166049"
},
{
"name": "Makefile",
"bytes": "1262"
},
{
"name": "Python",
"bytes": "238140"
}
],
"symlink_target": ""
} |
import logging
log = logging.getLogger(__name__)
import os
import uuid
from flask import (
render_template, request, send_from_directory,
abort, jsonify, Response, redirect, url_for
)
from six import string_types
from bokeh import protocol
from bokeh.exceptions import DataIntegrityException
from bokeh.resources import Resources
from bokeh.templates import AUTOLOAD
from .bbauth import check_read_authentication_and_create_client
from ..app import bokeh_app
from ..crossdomain import crossdomain
from ..models import convenience as mconv
from ..models import docs
from ..models import user
from ..serverbb import prune
from ..views import make_json
def request_resources():
"""Creates resources instance based on url info from
current app/request context
"""
if bokeh_app.url_prefix:
# strip of leading slash
root_url = request.url_root + bokeh_app.url_prefix[1:]
else:
root_url = request.url_root
resources = Resources(root_url=root_url, mode='server')
return resources
def render(fname, **kwargs):
resources = request_resources()
bokeh_prefix = resources.root_url
return render_template(fname, bokeh_prefix=bokeh_prefix,
**kwargs)
@bokeh_app.route('/bokeh/ping')
def ping():
''' Test whether Bokeh server is up.
:status 200:
'''
# test route, to know if the server is up
return "pong"
@bokeh_app.route('/bokeh/')
def index(*unused_all, **kwargs):
''' Render main page.
:status 200: if current user logged in
:status 302: otherwise redirect to login
'''
bokehuser = bokeh_app.current_user()
if not bokehuser:
return redirect(url_for('.login_get'))
return render('bokeh.html',
splitjs=bokeh_app.splitjs,
username=bokehuser.username,
title="Bokeh Documents for %s" % bokehuser.username
)
@bokeh_app.route('/')
def welcome(*unused_all, **kwargs):
''' Redirect to index
:status 302: redirect to index
'''
return redirect(url_for('.index'))
@bokeh_app.route('/bokeh/favicon.ico')
def favicon():
''' Return favicon.
:status 200: return favicon
'''
return send_from_directory(os.path.join(bokeh_app.root_path, 'static'),
'favicon.ico', mimetype='image/x-icon')
def _makedoc(redisconn, u, title):
docid = str(uuid.uuid4())
if isinstance(u, string_types):
u = user.User.load(redisconn, u)
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
u.add_doc(docid, title)
doc = docs.new_doc(bokeh_app, docid,
title, clientdoc,
rw_users=[u.username])
u.save(redisconn)
bokeh_app.backbone_storage.store_document(clientdoc)
return doc
@bokeh_app.route('/bokeh/doc', methods=['POST'])
@bokeh_app.route('/bokeh/doc/', methods=['POST'])
def makedoc():
if request.json:
title = request.json['title']
else:
title = request.values['title']
bokehuser = bokeh_app.current_user()
try:
_makedoc(bokeh_app.servermodel_storage, bokehuser, title)
except DataIntegrityException as e:
return abort(409, e.message)
jsonstring = protocol.serialize_web(bokehuser.to_public_json())
msg = protocol.serialize_web({'msgtype' : 'docchange'})
bokeh_app.publisher.send("bokehuser:" + bokehuser.username, msg)
return make_json(jsonstring)
@bokeh_app.route('/bokeh/doc/<docid>', methods=['delete'])
@bokeh_app.route('/bokeh/doc/<docid>/', methods=['delete'])
def deletedoc(docid):
bokehuser = bokeh_app.current_user()
try:
bokehuser.remove_doc(docid)
bokehuser.save(bokeh_app.servermodel_storage)
except DataIntegrityException as e:
return abort(409, e.message)
jsonstring = protocol.serialize_web(bokehuser.to_public_json())
msg = protocol.serialize_web({'msgtype' : 'docchange'})
bokeh_app.publisher.send("bokehuser:" + bokehuser.username, msg)
return make_json(jsonstring)
@bokeh_app.route('/bokeh/getdocapikey/<docid>')
def get_doc_api_key(docid):
doc = docs.Doc.load(bokeh_app.servermodel_storage, docid)
if mconv.can_write_from_request(doc, request, bokeh_app):
return jsonify({'apikey' : doc.apikey})
elif mconv.can_write_from_request(doc, request, bokeh_app):
return jsonify({'readonlyapikey' : doc.readonlyapikey})
else:
return abort(401)
@bokeh_app.route('/bokeh/userinfo/', methods=['GET', 'OPTIONS'])
@crossdomain(origin="*", headers=['BOKEH-API-KEY', 'Continuum-Clientid'])
def get_user():
bokehuser = bokeh_app.current_user()
if not bokehuser:
abort(403)
content = protocol.serialize_web(bokehuser.to_public_json())
return make_json(content)
def _make_test_plot_file(username, userapikey, url):
lines = ["from bokeh import mpl",
"p = mpl.PlotClient(username='%s', serverloc='%s', userapikey='%s')" % (username, url, userapikey)]
return "\n".join(lines)
@bokeh_app.route('/bokeh/doc/<docid>/', methods=['GET', 'OPTIONS'])
@bokeh_app.route('/bokeh/bokehinfo/<docid>/', methods=['GET', 'OPTIONS'])
@crossdomain(origin="*", headers=['BOKEH-API-KEY', 'Continuum-Clientid'])
@check_read_authentication_and_create_client
def get_bokeh_info(docid):
return _get_bokeh_info(docid)
def _get_bokeh_info(docid):
doc = docs.Doc.load(bokeh_app.servermodel_storage, docid)
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
all_models = clientdoc._models.values()
log.info("num models: %s", len(all_models))
all_models = clientdoc.dump(*all_models)
returnval = {'plot_context_ref' : doc.plot_context_ref,
'docid' : docid,
'all_models' : all_models,
'apikey' : doc.apikey}
returnval = protocol.serialize_json(returnval)
#i don't think we need to set the header here...
result = make_json(returnval,
headers={"Access-Control-Allow-Origin": "*"})
return result
@bokeh_app.route('/bokeh/doc/<title>/show', methods=['GET', 'OPTIONS'])
@crossdomain(origin="*", headers=['BOKEH-API-KEY', 'Continuum-Clientid'])
def show_doc_by_title(title):
bokehuser = bokeh_app.current_user()
docs = [ doc for doc in bokehuser.docs if doc['title'] == title ]
doc = docs[0] if len(docs) != 0 else abort(404)
docid = doc['docid']
return render('show.html', title=title, docid=docid, splitjs=bokeh_app.splitjs)
@bokeh_app.route('/bokeh/doc/', methods=['GET', 'OPTIONS'])
@crossdomain(origin="*", headers=['BOKEH-API-KEY', 'Continuum-Clientid'])
def doc_by_title():
if request.json:
title = request.json['title']
else:
title = request.values['title']
bokehuser = bokeh_app.current_user()
docs = [doc for doc in bokehuser.docs if doc['title'] == title]
if len(docs) == 0:
try:
doc = _makedoc(bokeh_app.servermodel_storage, bokehuser, title)
docid = doc.docid
except DataIntegrityException as e:
return abort(409, e.message)
msg = protocol.serialize_web({'msgtype' : 'docchange'})
bokeh_app.publisher.send("bokehuser:" + bokehuser.username, msg)
else:
doc = docs[0]
docid = doc['docid']
return get_bokeh_info(docid)
# need to rethink public publishing
# @bokeh_app.route('/bokeh/publicbokehinfo/<docid>')
# def get_public_bokeh_info(docid):
# doc = docs.Doc.load(bokeh_app.servermodel_storage, docid)
# plot_context_ref = doc.plot_context_ref
# all_models = docs.prune_and_get_valid_models(bokeh_app.servermodel_storage,
# bokeh_app.collections,
# docid)
# public_models = [x for x in all_models if x.get('public', False)]
# if len(public_models) == 0:
# return False
# all_models_json = [x.to_broadcast_json() for x in all_models]
# returnval = {'plot_context_ref' : plot_context_ref,
# 'docid' : docid,
# 'all_models' : all_models_json,
# }
# returnval = protocol.serialize_web(returnval)
# #return returnval
# return (returnval, "200",
# {"Access-Control-Allow-Origin": "*"})
@bokeh_app.route('/bokeh/sampleerror')
def sampleerror():
return 1 + "sdf"
def make_test_plot():
import numpy as np
from bokeh.plotting import output_server, line
N = 8000
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
output_server("line.py example")
l = line(
x,y, color="#0000FF",
plot_height=300, plot_width=300,
tools="pan,resize")
return l
#show()
@bokeh_app.route("/bokeh/autoload.js/<elementid>")
def autoload_js(elementid):
''' Return autoload script for given elementid
:param elementid: DOM element ID to target
:status 200: return script
'''
resources = request_resources()
rendered = AUTOLOAD.render(
js_url = resources.js_files[0],
css_files = resources.css_files,
elementid = elementid,
)
return Response(rendered, 200,
{'Content-Type':'application/javascript'})
@bokeh_app.route('/bokeh/objinfo/<docid>/<objid>', methods=['GET', 'OPTIONS'])
@crossdomain(origin="*", headers=['BOKEH-API-KEY', 'Continuum-Clientid'])
@check_read_authentication_and_create_client
def get_bokeh_info_one_object(docid, objid):
doc = docs.Doc.load(bokeh_app.servermodel_storage, docid)
clientdoc = bokeh_app.backbone_storage.get_document(docid)
obj = clientdoc._models[objid]
objs = obj.references()
all_models = clientdoc.dump(*objs)
returnval = {'plot_context_ref' : doc.plot_context_ref,
'docid' : docid,
'all_models' : all_models,
'apikey' : doc.apikey,
'type' : obj.__view_model__
}
returnval = protocol.serialize_json(returnval)
result = make_json(returnval,
headers={"Access-Control-Allow-Origin": "*"})
return result
@bokeh_app.route('/bokeh/doc/<docid>/<objid>', methods=['GET'])
def show_obj(docid, objid):
bokehuser = bokeh_app.current_user()
if not bokehuser:
return redirect(url_for(".login_get", next=request.url))
resources = request_resources()
return render("oneobj.html",
elementid=str(uuid.uuid4()),
docid=docid,
objid=objid,
hide_navbar=True,
splitjs=bokeh_app.splitjs,
username=bokehuser.username,
loglevel=resources.log_level)
@bokeh_app.route('/bokeh/wsurl/', methods=['GET'])
@crossdomain(origin="*", headers=['BOKEH-API-KEY', 'Continuum-Clientid'])
def wsurl():
if bokeh_app.websocket_params.get('ws_conn_string'):
return bokeh_app.websocket_params.get('ws_conn_string')
else:
prefix = bokeh_app.url_prefix
if prefix is None or prefix == "/":
prefix = ""
ws_port = bokeh_app.websocket_params['ws_port']
host = request.host.split(":")[0]
#TODO:ssl..?
return "ws://%s:%d%s/bokeh/sub/" % (host, ws_port, prefix)
| {
"content_hash": "39fa9ac6740eb03000903bc323f6335a",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 112,
"avg_line_length": 34.579754601226995,
"alnum_prop": 0.6280493213873858,
"repo_name": "jakevdp/bokeh",
"id": "18a08ade4975480dc003391d250d1dc57371ec70",
"size": "11274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/server/views/main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import datetime
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.extend([
os.path.abspath("../.."),
])
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named "sphinx.ext.*") or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
#source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"Rally"
copyright = u"%d, OpenStack Foundation" % datetime.datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.0.3"
# The full version, including alpha/beta/rc tags.
release = "0.0.3"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ""
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = "%B %d, %Y"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["feature_request/README.rst", "samples/README.rst",
"**/README.rst"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, "()" will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not "", a "Last updated on:" timestamp is inserted at every page bottom,
# using the given strftime format.
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ""
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "%sdoc" % project
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ("letterpaper" or "a4paper").
#"papersize": "letterpaper",
# The font size ("10pt", "11pt" or "12pt").
#"pointsize": "10pt",
# Additional stuff for the LaTeX preamble.
#"preamble": "",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index",
"%s.tex" % project,
u"%s Documentation" % project,
u"OpenStack Foundation", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = [
# ("index", "rally", u"Rally Documentation",
# [u"Rally Team"], 1)
#]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
("index", "Rally", u"Rally Documentation",
u"Rally Team", "Rally", "One line description of project.",
"Miscellaneous"),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: "footnote", "no", or "inline".
#texinfo_show_urls = "footnote"
| {
"content_hash": "7f378d7c13f0e7169f7fca86451b1296",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 80,
"avg_line_length": 31.443089430894307,
"alnum_prop": 0.6947640594699418,
"repo_name": "pandeyop/rally",
"id": "d92550e705f3b9cb28aab1e290e4829cebacb22e",
"size": "8151",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46741"
},
{
"name": "Python",
"bytes": "2053831"
},
{
"name": "Shell",
"bytes": "18078"
}
],
"symlink_target": ""
} |
import os
import inspect
import tempfile
from logbook import Logger
from contextlib import contextmanager
modules = set()
from typing import List, Dict, TypeVar, Any, Callable
T = TypeVar("T")
def get_logger(name: str) -> Logger:
"""
Returns a logging provider
:param name: name for logger
:return: logger
"""
return Logger(name)
log = get_logger("util")
def location(directory: str) -> str:
"""
:param directory: Directory in usual unix convention
:return: OS-specialized
"""
return os.path.join(*directory.split("/"))
def current(*directory: List[str]) -> str:
"""
Locator service
:param directory: what to look for
:return: formed directory
"""
return os.path.join(os.getcwd(), *directory)
def class_factory(name: str, base: T, **kwargs: Dict[str, Any]) -> object:
"""
Dynamic class generator
:param name: class name
:param base: parent class
:param: kwargs: optional params
:return:
"""
def __init__(self, **options):
for key, val in options.items():
setattr(self, key, val)
self.__name__ = endpoint_name(name)
base.__init__(self)
struct = {"__init__": __init__}
struct.update(kwargs)
ctr = type(name, (base,), struct)
return ctr
def endpoint_name(name: str) -> str:
"""
Converts string from CameCase to under_score_case
:param name: regular name
:return:
"""
LState = class_factory("LState", object)
UState = class_factory("UState", object)
state = UState
words = []
cur = []
for l in name:
if state == UState and l.isupper():
cur.append(l.lower())
elif state == UState and l.islower():
state = LState
cur.append(l)
elif state == LState and l.isupper():
words.append("".join(cur))
cur = [l.lower()]
state = UState
else:
cur.append(l)
words.append("".join(cur))
return "_".join(words)
def camel_case_name(name: str) -> str:
"""
Converts string to CamelCase
:param name: input string
:return: CamelCased string
"""
return "".join([words[0].upper() + words[1:] for words in name.split("_")])
def load_js(file: str, locator: Callable=current) -> str:
"""
Loads JavaScript into memory
:param file: javascript file
:param locator: function which tells where to look for it
:return: javascript as a string
"""
with open(locator(file)) as f:
raw = f.read()
return raw
def get_request_args() -> tuple:
"""
Returns arguments passed to request
:return:
"""
return request.args
def get_request_json() -> dict:
"""
Returns json arguments passed to request
:return:
"""
import json
def first_non_empty(arr):
for obj in arr:
if len(obj.keys()) > 0:
return obj
return {}
return first_non_empty([request.get_json(), json.loads(request.data.decode("utf-8"))])
def is_stub(method: Callable) -> bool:
"""
Checks if method is stub
:param method:
:return:
"""
return hasattr(method, "__stub__")
@contextmanager
def temp_file():
"""
Creates a temp file and deletes it afterwards
:return:
"""
temp = tempfile.NamedTemporaryFile(delete=False)
try:
yield temp
finally:
temp.close()
os.unlink(temp.name)
@contextmanager
def timer(title: str) -> None:
"""
Measures time elapsed in current block
:param title: Name of block to be visible in output
:return:
"""
from time import perf_counter as pc
start = pc()
try:
yield
finally:
timediff = (pc() - start) * 1000
log.debug("It took {0} ms to execute block '{1}'".format(timediff, title))
| {
"content_hash": "04a5cf1f6c385939a4018884818a254f",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 90,
"avg_line_length": 22.436046511627907,
"alnum_prop": 0.5871987561544442,
"repo_name": "zaibacu/wutu-compiler",
"id": "0f3c1acd3b63037f44d349e3d87a6d8cca351d37",
"size": "3859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wutu_compiler/utils/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1316"
},
{
"name": "Python",
"bytes": "27389"
}
],
"symlink_target": ""
} |
import os
DEBUG = False
ALLOWED_HOSTS = ['*']
LANGUAGE_CODE = 'en-us'
ROOT_URLCONF = 'urls'
SECRET_KEY = '12345abcd'
SITE_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
SITE_ID = 1
TIME_ZONE = 'UTC'
USE_I18N = True
UMANAGE_BASE_TEMPLATE = 'base_umanage.html'
UMANAGE_BASE_UNAUTHENTICATED_TEMPLATE = 'base_umanage_unauthenticated.html'
UMANAGE_FROM_EMAIL = 'noreply@example.com'
UMANAGE_SITE_ROOT_URI = 'http://somedomain.com'
UMANAGE_SITE_NAME = 'My Site Name'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.humanize',
'django_core',
'umanage',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'umanage.context_processors.common',
)
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'tests/templates'),
os.path.join(SITE_ROOT, 'umanage/templates'),
)
here = lambda *x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': here('test_db.db')
}
}
| {
"content_hash": "66c00681dc1963999132518a212ba1b2",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 78,
"avg_line_length": 25.754716981132077,
"alnum_prop": 0.6857142857142857,
"repo_name": "InfoAgeTech/django-umanage",
"id": "aab2021f97b510ea58bbe8a046e6ccf0fb5791a9",
"size": "1365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14186"
},
{
"name": "Python",
"bytes": "55734"
}
],
"symlink_target": ""
} |
from django.template import Context, Library
register = Library()
@register.simple_tag(takes_context=True)
def section(context, section_name):
try:
data = context['infos'][section_name].items()
except KeyError as e:
accepted = ",".join(context['infos'].keys())
raise KeyError("Section `{0}` not found in ({1})".format(e, accepted))
name = section_name.lower()
ctx = {
'name': name,
'section': data
}
filename = 'admin/sysinfo/%s_section.html' % name
t = context.template.engine.get_template(filename)
return t.render(Context(ctx))
| {
"content_hash": "ed8424edb59c8f4dfa7c2ceac690bd0e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 27.636363636363637,
"alnum_prop": 0.6299342105263158,
"repo_name": "saxix/django-sysinfo",
"id": "06e9725caf1281b8fbaa973e702ad0321fcb9378",
"size": "608",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/django_sysinfo/templatetags/sysinfo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "5702"
},
{
"name": "Makefile",
"bytes": "1615"
},
{
"name": "Python",
"bytes": "50580"
},
{
"name": "SCSS",
"bytes": "346"
}
],
"symlink_target": ""
} |
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Torrentcoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| {
"content_hash": "c04a32ff075d5309c42e6f708d985cc3",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 27.375,
"alnum_prop": 0.710806697108067,
"repo_name": "torrentcointeam/torrentcoin",
"id": "d8fa40e5b23e3f22c2dc73fa6565d88ac8b62206",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/qt/clean_mac_info_plist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3058087"
},
{
"name": "C++",
"bytes": "2562573"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "IDL",
"bytes": "14451"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "37276"
},
{
"name": "Shell",
"bytes": "2527"
},
{
"name": "TypeScript",
"bytes": "115643"
}
],
"symlink_target": ""
} |
"""Base class for sparse matrices"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .sputils import (isdense, isscalarlike, isintlike,
get_sum_dtype, validateaxis, check_reshape_kwargs,
check_shape, asmatrix)
__all__ = ['spmatrix', 'isspmatrix', 'issparse',
'SparseWarning', 'SparseEfficiencyWarning']
class SparseWarning(Warning):
pass
class SparseFormatWarning(SparseWarning):
pass
class SparseEfficiencyWarning(SparseWarning):
pass
# The formats that we might potentially understand.
_formats = {'csc': [0, "Compressed Sparse Column"],
'csr': [1, "Compressed Sparse Row"],
'dok': [2, "Dictionary Of Keys"],
'lil': [3, "List of Lists"],
'dod': [4, "Dictionary of Dictionaries"],
'sss': [5, "Symmetric Sparse Skyline"],
'coo': [6, "COOrdinate"],
'lba': [7, "Linpack BAnded"],
'egd': [8, "Ellpack-itpack Generalized Diagonal"],
'dia': [9, "DIAgonal"],
'bsr': [10, "Block Sparse Row"],
'msr': [11, "Modified compressed Sparse Row"],
'bsc': [12, "Block Sparse Column"],
'msc': [13, "Modified compressed Sparse Column"],
'ssk': [14, "Symmetric SKyline"],
'nsk': [15, "Nonsymmetric SKyline"],
'jad': [16, "JAgged Diagonal"],
'uss': [17, "Unsymmetric Sparse Skyline"],
'vbr': [18, "Variable Block Row"],
'und': [19, "Undefined"]
}
# These univariate ufuncs preserve zeros.
_ufuncs_with_fixed_point_at_zero = frozenset([
np.sin, np.tan, np.arcsin, np.arctan, np.sinh, np.tanh, np.arcsinh,
np.arctanh, np.rint, np.sign, np.expm1, np.log1p, np.deg2rad,
np.rad2deg, np.floor, np.ceil, np.trunc, np.sqrt])
MAXPRINT = 50
class spmatrix(object):
""" This class provides a base class for all sparse matrices. It
cannot be instantiated. Most of the work is provided by subclasses.
"""
__array_priority__ = 10.1
ndim = 2
def __init__(self, maxprint=MAXPRINT):
self._shape = None
if self.__class__.__name__ == 'spmatrix':
raise ValueError("This class is not intended"
" to be instantiated directly.")
self.maxprint = maxprint
def set_shape(self, shape):
"""See `reshape`."""
# Make sure copy is False since this is in place
# Make sure format is unchanged because we are doing a __dict__ swap
new_matrix = self.reshape(shape, copy=False).asformat(self.format)
self.__dict__ = new_matrix.__dict__
def get_shape(self):
"""Get shape of a matrix."""
return self._shape
shape = property(fget=get_shape, fset=set_shape)
def reshape(self, *args, **kwargs):
"""reshape(self, shape, order='C', copy=False)
Gives a new shape to a sparse matrix without changing its data.
Parameters
----------
shape : length-2 tuple of ints
The new shape should be compatible with the original shape.
order : {'C', 'F'}, optional
Read the elements using this index order. 'C' means to read and
write the elements using C-like index order; e.g., read entire first
row, then second row, etc. 'F' means to read and write the elements
using Fortran-like index order; e.g., read entire first column, then
second column, etc.
copy : bool, optional
Indicates whether or not attributes of self should be copied
whenever possible. The degree to which attributes are copied varies
depending on the type of sparse matrix being used.
Returns
-------
reshaped_matrix : sparse matrix
A sparse matrix with the given `shape`, not necessarily of the same
format as the current object.
See Also
--------
numpy.matrix.reshape : NumPy's implementation of 'reshape' for
matrices
"""
# If the shape already matches, don't bother doing an actual reshape
# Otherwise, the default is to convert to COO and use its reshape
shape = check_shape(args, self.shape)
order, copy = check_reshape_kwargs(kwargs)
if shape == self.shape:
if copy:
return self.copy()
else:
return self
return self.tocoo(copy=copy).reshape(shape, order=order, copy=False)
def resize(self, shape):
"""Resize the matrix in-place to dimensions given by ``shape``
Any elements that lie within the new shape will remain at the same
indices, while non-zero elements lying outside the new shape are
removed.
Parameters
----------
shape : (int, int)
number of rows and columns in the new matrix
Notes
-----
The semantics are not identical to `numpy.ndarray.resize` or
`numpy.resize`. Here, the same data will be maintained at each index
before and after reshape, if that index is within the new bounds. In
numpy, resizing maintains contiguity of the array, moving elements
around in the logical matrix but not within a flattened representation.
We give no guarantees about whether the underlying data attributes
(arrays, etc.) will be modified in place or replaced with new objects.
"""
# As an inplace operation, this requires implementation in each format.
raise NotImplementedError(
'{}.resize is not implemented'.format(type(self).__name__))
def astype(self, dtype, casting='unsafe', copy=True):
"""Cast the matrix elements to a specified type.
Parameters
----------
dtype : string or numpy dtype
Typecode or data-type to which to cast the data.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
Defaults to 'unsafe' for backwards compatibility.
'no' means the data types should not be cast at all.
'equiv' means only byte-order changes are allowed.
'safe' means only casts which can preserve values are allowed.
'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
'unsafe' means any data conversions may be done.
copy : bool, optional
If `copy` is `False`, the result might share some memory with this
matrix. If `copy` is `True`, it is guaranteed that the result and
this matrix do not share any memory.
"""
dtype = np.dtype(dtype)
if self.dtype != dtype:
return self.tocsr().astype(
dtype, casting=casting, copy=copy).asformat(self.format)
elif copy:
return self.copy()
else:
return self
def asfptype(self):
"""Upcast matrix to a floating point format (if necessary)"""
fp_types = ['f', 'd', 'F', 'D']
if self.dtype.char in fp_types:
return self
else:
for fp_type in fp_types:
if self.dtype <= np.dtype(fp_type):
return self.astype(fp_type)
raise TypeError('cannot upcast [%s] to a floating '
'point format' % self.dtype.name)
def __iter__(self):
for r in range(self.shape[0]):
yield self[r, :]
def getmaxprint(self):
"""Maximum number of elements to display when printed."""
return self.maxprint
def count_nonzero(self):
"""Number of non-zero entries, equivalent to
np.count_nonzero(a.toarray())
Unlike getnnz() and the nnz property, which return the number of stored
entries (the length of the data attribute), this method counts the
actual number of non-zero entries in data.
"""
raise NotImplementedError("count_nonzero not implemented for %s." %
self.__class__.__name__)
def getnnz(self, axis=None):
"""Number of stored values, including explicit zeros.
Parameters
----------
axis : None, 0, or 1
Select between the number of values across the whole matrix, in
each column, or in each row.
See also
--------
count_nonzero : Number of non-zero entries
"""
raise NotImplementedError("getnnz not implemented for %s." %
self.__class__.__name__)
@property
def nnz(self):
"""Number of stored values, including explicit zeros.
See also
--------
count_nonzero : Number of non-zero entries
"""
return self.getnnz()
def getformat(self):
"""Format of a matrix representation as a string."""
return getattr(self, 'format', 'und')
def __repr__(self):
_, format_name = _formats[self.getformat()]
return "<%dx%d sparse matrix of type '%s'\n" \
"\twith %d stored elements in %s format>" % \
(self.shape + (self.dtype.type, self.nnz, format_name))
def __str__(self):
maxprint = self.getmaxprint()
A = self.tocoo()
# helper function, outputs "(i,j) v"
def tostr(row, col, data):
triples = zip(list(zip(row, col)), data)
return '\n'.join([(' %s\t%s' % t) for t in triples])
if self.nnz > maxprint:
half = maxprint // 2
out = tostr(A.row[:half], A.col[:half], A.data[:half])
out += "\n :\t:\n"
half = maxprint - maxprint//2
out += tostr(A.row[-half:], A.col[-half:], A.data[-half:])
else:
out = tostr(A.row, A.col, A.data)
return out
def __bool__(self): # Simple -- other ideas?
if self.shape == (1, 1):
return self.nnz != 0
else:
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all().")
__nonzero__ = __bool__
# What should len(sparse) return? For consistency with dense matrices,
# perhaps it should be the number of rows? But for some uses the number of
# non-zeros is more important. For now, raise an exception!
def __len__(self):
raise TypeError("sparse matrix length is ambiguous; use getnnz()"
" or shape[0]")
def asformat(self, format, copy=False):
"""Return this matrix in the passed format.
Parameters
----------
format : {str, None}
The desired matrix format ("csr", "csc", "lil", "dok", "array", ...)
or None for no conversion.
copy : bool, optional
If True, the result is guaranteed to not share data with self.
Returns
-------
A : This matrix in the passed format.
"""
if format is None or format == self.format:
if copy:
return self.copy()
else:
return self
else:
try:
convert_method = getattr(self, 'to' + format)
except AttributeError:
raise ValueError('Format {} is unknown.'.format(format))
# Forward the copy kwarg, if it's accepted.
try:
return convert_method(copy=copy)
except TypeError:
return convert_method()
###################################################################
# NOTE: All arithmetic operations use csr_matrix by default.
# Therefore a new sparse matrix format just needs to define a
# .tocsr() method to provide arithmetic support. Any of these
# methods can be overridden for efficiency.
####################################################################
def multiply(self, other):
"""Point-wise multiplication by another matrix
"""
return self.tocsr().multiply(other)
def maximum(self, other):
"""Element-wise maximum between this and another matrix."""
return self.tocsr().maximum(other)
def minimum(self, other):
"""Element-wise minimum between this and another matrix."""
return self.tocsr().minimum(other)
def dot(self, other):
"""Ordinary dot product
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = np.array([1, 0, -1])
>>> A.dot(v)
array([ 1, -3, -1], dtype=int64)
"""
return self * other
def power(self, n, dtype=None):
"""Element-wise power."""
return self.tocsr().power(n, dtype=dtype)
def __eq__(self, other):
return self.tocsr().__eq__(other)
def __ne__(self, other):
return self.tocsr().__ne__(other)
def __lt__(self, other):
return self.tocsr().__lt__(other)
def __gt__(self, other):
return self.tocsr().__gt__(other)
def __le__(self, other):
return self.tocsr().__le__(other)
def __ge__(self, other):
return self.tocsr().__ge__(other)
def __abs__(self):
return abs(self.tocsr())
def __round__(self, ndigits=0):
return round(self.tocsr(), ndigits=ndigits)
def _add_sparse(self, other):
return self.tocsr()._add_sparse(other)
def _add_dense(self, other):
return self.tocoo()._add_dense(other)
def _sub_sparse(self, other):
return self.tocsr()._sub_sparse(other)
def _sub_dense(self, other):
return self.todense() - other
def _rsub_dense(self, other):
# note: this can't be replaced by other + (-self) for unsigned types
return other - self.todense()
def __add__(self, other): # self + other
if isscalarlike(other):
if other == 0:
return self.copy()
# Now we would add this scalar to every element.
raise NotImplementedError('adding a nonzero scalar to a '
'sparse matrix is not supported')
elif isspmatrix(other):
if other.shape != self.shape:
raise ValueError("inconsistent shapes")
return self._add_sparse(other)
elif isdense(other):
other = np.broadcast_to(other, self.shape)
return self._add_dense(other)
else:
return NotImplemented
def __radd__(self,other): # other + self
return self.__add__(other)
def __sub__(self, other): # self - other
if isscalarlike(other):
if other == 0:
return self.copy()
raise NotImplementedError('subtracting a nonzero scalar from a '
'sparse matrix is not supported')
elif isspmatrix(other):
if other.shape != self.shape:
raise ValueError("inconsistent shapes")
return self._sub_sparse(other)
elif isdense(other):
other = np.broadcast_to(other, self.shape)
return self._sub_dense(other)
else:
return NotImplemented
def __rsub__(self,other): # other - self
if isscalarlike(other):
if other == 0:
return -self.copy()
raise NotImplementedError('subtracting a sparse matrix from a '
'nonzero scalar is not supported')
elif isdense(other):
other = np.broadcast_to(other, self.shape)
return self._rsub_dense(other)
else:
return NotImplemented
def __mul__(self, other):
"""interpret other and call one of the following
self._mul_scalar()
self._mul_vector()
self._mul_multivector()
self._mul_sparse_matrix()
"""
M, N = self.shape
if other.__class__ is np.ndarray:
# Fast path for the most common case
if other.shape == (N,):
return self._mul_vector(other)
elif other.shape == (N, 1):
return self._mul_vector(other.ravel()).reshape(M, 1)
elif other.ndim == 2 and other.shape[0] == N:
return self._mul_multivector(other)
if isscalarlike(other):
# scalar value
return self._mul_scalar(other)
if issparse(other):
if self.shape[1] != other.shape[0]:
raise ValueError('dimension mismatch')
return self._mul_sparse_matrix(other)
# If it's a list or whatever, treat it like a matrix
other_a = np.asanyarray(other)
if other_a.ndim == 0 and other_a.dtype == np.object_:
# Not interpretable as an array; return NotImplemented so that
# other's __rmul__ can kick in if that's implemented.
return NotImplemented
try:
other.shape
except AttributeError:
other = other_a
if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:
# dense row or column vector
if other.shape != (N,) and other.shape != (N, 1):
raise ValueError('dimension mismatch')
result = self._mul_vector(np.ravel(other))
if isinstance(other, np.matrix):
result = asmatrix(result)
if other.ndim == 2 and other.shape[1] == 1:
# If 'other' was an (nx1) column vector, reshape the result
result = result.reshape(-1, 1)
return result
elif other.ndim == 2:
##
# dense 2D array or matrix ("multivector")
if other.shape[0] != self.shape[1]:
raise ValueError('dimension mismatch')
result = self._mul_multivector(np.asarray(other))
if isinstance(other, np.matrix):
result = asmatrix(result)
return result
else:
raise ValueError('could not interpret dimensions')
# by default, use CSR for __mul__ handlers
def _mul_scalar(self, other):
return self.tocsr()._mul_scalar(other)
def _mul_vector(self, other):
return self.tocsr()._mul_vector(other)
def _mul_multivector(self, other):
return self.tocsr()._mul_multivector(other)
def _mul_sparse_matrix(self, other):
return self.tocsr()._mul_sparse_matrix(other)
def __rmul__(self, other): # other * self
if isscalarlike(other):
return self.__mul__(other)
else:
# Don't use asarray unless we have to
try:
tr = other.transpose()
except AttributeError:
tr = np.asarray(other).transpose()
return (self.transpose() * tr).transpose()
#####################################
# matmul (@) operator (Python 3.5+) #
#####################################
def __matmul__(self, other):
if isscalarlike(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__mul__(other)
def __rmatmul__(self, other):
if isscalarlike(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__rmul__(other)
####################
# Other Arithmetic #
####################
def _divide(self, other, true_divide=False, rdivide=False):
if isscalarlike(other):
if rdivide:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
if true_divide and np.can_cast(self.dtype, np.float_):
return self.astype(np.float_)._mul_scalar(1./other)
else:
r = self._mul_scalar(1./other)
scalar_dtype = np.asarray(other).dtype
if (np.issubdtype(self.dtype, np.integer) and
np.issubdtype(scalar_dtype, np.integer)):
return r.astype(self.dtype)
else:
return r
elif isdense(other):
if not rdivide:
if true_divide:
return np.true_divide(self.todense(), other)
else:
return np.divide(self.todense(), other)
else:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
elif isspmatrix(other):
if rdivide:
return other._divide(self, true_divide, rdivide=False)
self_csr = self.tocsr()
if true_divide and np.can_cast(self.dtype, np.float_):
return self_csr.astype(np.float_)._divide_sparse(other)
else:
return self_csr._divide_sparse(other)
else:
return NotImplemented
def __truediv__(self, other):
return self._divide(other, true_divide=True)
def __div__(self, other):
# Always do true division
return self._divide(other, true_divide=True)
def __rtruediv__(self, other):
# Implementing this as the inverse would be too magical -- bail out
return NotImplemented
def __rdiv__(self, other):
# Implementing this as the inverse would be too magical -- bail out
return NotImplemented
def __neg__(self):
return -self.tocsr()
def __iadd__(self, other):
return NotImplemented
def __isub__(self, other):
return NotImplemented
def __imul__(self, other):
return NotImplemented
def __idiv__(self, other):
return self.__itruediv__(other)
def __itruediv__(self, other):
return NotImplemented
def __pow__(self, other):
if self.shape[0] != self.shape[1]:
raise TypeError('matrix is not square')
if isintlike(other):
other = int(other)
if other < 0:
raise ValueError('exponent must be >= 0')
if other == 0:
from .construct import eye
return eye(self.shape[0], dtype=self.dtype)
elif other == 1:
return self.copy()
else:
tmp = self.__pow__(other//2)
if (other % 2):
return self * tmp * tmp
else:
return tmp * tmp
elif isscalarlike(other):
raise ValueError('exponent must be an integer')
else:
return NotImplemented
def __getattr__(self, attr):
if attr == 'A':
return self.toarray()
elif attr == 'T':
return self.transpose()
elif attr == 'H':
return self.getH()
elif attr == 'real':
return self._real()
elif attr == 'imag':
return self._imag()
elif attr == 'size':
return self.getnnz()
else:
raise AttributeError(attr + " not found")
def transpose(self, axes=None, copy=False):
"""
Reverses the dimensions of the sparse matrix.
Parameters
----------
axes : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except
for the default value.
copy : bool, optional
Indicates whether or not attributes of `self` should be
copied whenever possible. The degree to which attributes
are copied varies depending on the type of sparse matrix
being used.
Returns
-------
p : `self` with the dimensions reversed.
See Also
--------
numpy.matrix.transpose : NumPy's implementation of 'transpose'
for matrices
"""
return self.tocsr(copy=copy).transpose(axes=axes, copy=False)
def conj(self, copy=True):
"""Element-wise complex conjugation.
If the matrix is of non-complex data type and `copy` is False,
this method does nothing and the data is not copied.
Parameters
----------
copy : bool, optional
If True, the result is guaranteed to not share data with self.
Returns
-------
A : The element-wise complex conjugate.
"""
if np.issubdtype(self.dtype, np.complexfloating):
return self.tocsr(copy=copy).conj(copy=False)
elif copy:
return self.copy()
else:
return self
def conjugate(self, copy=True):
return self.conj(copy=copy)
conjugate.__doc__ = conj.__doc__
# Renamed conjtranspose() -> getH() for compatibility with dense matrices
def getH(self):
"""Return the Hermitian transpose of this matrix.
See Also
--------
numpy.matrix.getH : NumPy's implementation of `getH` for matrices
"""
return self.transpose().conj()
def _real(self):
return self.tocsr()._real()
def _imag(self):
return self.tocsr()._imag()
def nonzero(self):
"""nonzero indices
Returns a tuple of arrays (row,col) containing the indices
of the non-zero elements of the matrix.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1,2,0],[0,0,3],[4,0,5]])
>>> A.nonzero()
(array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))
"""
# convert to COOrdinate format
A = self.tocoo()
nz_mask = A.data != 0
return (A.row[nz_mask], A.col[nz_mask])
def getcol(self, j):
"""Returns a copy of column j of the matrix, as an (m x 1) sparse
matrix (column vector).
"""
# Spmatrix subclasses should override this method for efficiency.
# Post-multiply by a (n x 1) column vector 'a' containing all zeros
# except for a_j = 1
from .csc import csc_matrix
n = self.shape[1]
if j < 0:
j += n
if j < 0 or j >= n:
raise IndexError("index out of bounds")
col_selector = csc_matrix(([1], [[j], [0]]),
shape=(n, 1), dtype=self.dtype)
return self * col_selector
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n) sparse
matrix (row vector).
"""
# Spmatrix subclasses should override this method for efficiency.
# Pre-multiply by a (1 x m) row vector 'a' containing all zeros
# except for a_i = 1
from .csr import csr_matrix
m = self.shape[0]
if i < 0:
i += m
if i < 0 or i >= m:
raise IndexError("index out of bounds")
row_selector = csr_matrix(([1], [[0], [i]]),
shape=(1, m), dtype=self.dtype)
return row_selector * self
# def __array__(self):
# return self.toarray()
def todense(self, order=None, out=None):
"""
Return a dense matrix representation of this matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-D, optional
If specified, uses this array (or `numpy.matrix`) as the
output buffer instead of allocating a new array to
return. The provided array must have the same shape and
dtype as the sparse matrix on which you are calling the
method.
Returns
-------
arr : numpy.matrix, 2-D
A NumPy matrix object with the same shape and containing
the same data represented by the sparse matrix, with the
requested memory order. If `out` was passed and was an
array (rather than a `numpy.matrix`), it will be filled
with the appropriate values and returned wrapped in a
`numpy.matrix` object that shares the same memory.
"""
return asmatrix(self.toarray(order=order, out=out))
def toarray(self, order=None, out=None):
"""
Return a dense ndarray representation of this matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multidimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', indicating the NumPy default of C-ordered.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-D, optional
If specified, uses this array as the output buffer
instead of allocating a new array to return. The provided
array must have the same shape and dtype as the sparse
matrix on which you are calling the method. For most
sparse types, `out` is required to be memory contiguous
(either C or Fortran ordered).
Returns
-------
arr : ndarray, 2-D
An array with the same shape and containing the same
data represented by the sparse matrix, with the requested
memory order. If `out` was passed, the same object is
returned after being modified in-place to contain the
appropriate values.
"""
return self.tocoo(copy=False).toarray(order=order, out=out)
# Any sparse matrix format deriving from spmatrix must define one of
# tocsr or tocoo. The other conversion methods may be implemented for
# efficiency, but are not required.
def tocsr(self, copy=False):
"""Convert this matrix to Compressed Sparse Row format.
With copy=False, the data/indices may be shared between this matrix and
the resultant csr_matrix.
"""
return self.tocoo(copy=copy).tocsr(copy=False)
def todok(self, copy=False):
"""Convert this matrix to Dictionary Of Keys format.
With copy=False, the data/indices may be shared between this matrix and
the resultant dok_matrix.
"""
return self.tocoo(copy=copy).todok(copy=False)
def tocoo(self, copy=False):
"""Convert this matrix to COOrdinate format.
With copy=False, the data/indices may be shared between this matrix and
the resultant coo_matrix.
"""
return self.tocsr(copy=False).tocoo(copy=copy)
def tolil(self, copy=False):
"""Convert this matrix to List of Lists format.
With copy=False, the data/indices may be shared between this matrix and
the resultant lil_matrix.
"""
return self.tocsr(copy=False).tolil(copy=copy)
def todia(self, copy=False):
"""Convert this matrix to sparse DIAgonal format.
With copy=False, the data/indices may be shared between this matrix and
the resultant dia_matrix.
"""
return self.tocoo(copy=copy).todia(copy=False)
def tobsr(self, blocksize=None, copy=False):
"""Convert this matrix to Block Sparse Row format.
With copy=False, the data/indices may be shared between this matrix and
the resultant bsr_matrix.
When blocksize=(R, C) is provided, it will be used for construction of
the bsr_matrix.
"""
return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy)
def tocsc(self, copy=False):
"""Convert this matrix to Compressed Sparse Column format.
With copy=False, the data/indices may be shared between this matrix and
the resultant csc_matrix.
"""
return self.tocsr(copy=copy).tocsc(copy=False)
def copy(self):
"""Returns a copy of this matrix.
No data/indices will be shared between the returned value and current
matrix.
"""
return self.__class__(self, copy=True)
def sum(self, axis=None, dtype=None, out=None):
"""
Sum the matrix elements over a given axis.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the sum is computed. The default is to
compute the sum of all the matrix elements, returning a scalar
(i.e., `axis` = `None`).
dtype : dtype, optional
The type of the returned matrix and of the accumulator in which
the elements are summed. The dtype of `a` is used by default
unless `a` has an integer dtype of less precision than the default
platform integer. In that case, if `a` is signed then the platform
integer is used while if `a` is unsigned then an unsigned integer
of the same precision as the platform integer is used.
.. versionadded:: 0.18.0
out : np.matrix, optional
Alternative output matrix in which to place the result. It must
have the same shape as the expected output, but the type of the
output values will be cast if necessary.
.. versionadded:: 0.18.0
Returns
-------
sum_along_axis : np.matrix
A matrix with the same shape as `self`, with the specified
axis removed.
See Also
--------
numpy.matrix.sum : NumPy's implementation of 'sum' for matrices
"""
validateaxis(axis)
# We use multiplication by a matrix of ones to achieve this.
# For some sparse matrix formats more efficient methods are
# possible -- these should override this function.
m, n = self.shape
# Mimic numpy's casting.
res_dtype = get_sum_dtype(self.dtype)
if axis is None:
# sum over rows and columns
return (self * asmatrix(np.ones(
(n, 1), dtype=res_dtype))).sum(
dtype=dtype, out=out)
if axis < 0:
axis += 2
# axis = 0 or 1 now
if axis == 0:
# sum over columns
ret = asmatrix(np.ones(
(1, m), dtype=res_dtype)) * self
else:
# sum over rows
ret = self * asmatrix(
np.ones((n, 1), dtype=res_dtype))
if out is not None and out.shape != ret.shape:
raise ValueError("dimensions do not match")
return ret.sum(axis=(), dtype=dtype, out=out)
def mean(self, axis=None, dtype=None, out=None):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the matrix elements. The average is taken
over all elements in the matrix by default, otherwise over the
specified axis. `float64` intermediate and return values are used
for integer inputs.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the mean is computed. The default is to compute
the mean of all elements in the matrix (i.e., `axis` = `None`).
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
.. versionadded:: 0.18.0
out : np.matrix, optional
Alternative output matrix in which to place the result. It must
have the same shape as the expected output, but the type of the
output values will be cast if necessary.
.. versionadded:: 0.18.0
Returns
-------
m : np.matrix
See Also
--------
numpy.matrix.mean : NumPy's implementation of 'mean' for matrices
"""
def _is_integral(dtype):
return (np.issubdtype(dtype, np.integer) or
np.issubdtype(dtype, np.bool_))
validateaxis(axis)
res_dtype = self.dtype.type
integral = _is_integral(self.dtype)
# output dtype
if dtype is None:
if integral:
res_dtype = np.float64
else:
res_dtype = np.dtype(dtype).type
# intermediate dtype for summation
inter_dtype = np.float64 if integral else res_dtype
inter_self = self.astype(inter_dtype)
if axis is None:
return (inter_self / np.array(
self.shape[0] * self.shape[1]))\
.sum(dtype=res_dtype, out=out)
if axis < 0:
axis += 2
# axis = 0 or 1 now
if axis == 0:
return (inter_self * (1.0 / self.shape[0])).sum(
axis=0, dtype=res_dtype, out=out)
else:
return (inter_self * (1.0 / self.shape[1])).sum(
axis=1, dtype=res_dtype, out=out)
def diagonal(self, k=0):
"""Returns the kth diagonal of the matrix.
Parameters
----------
k : int, optional
Which diagonal to get, corresponding to elements a[i, i+k].
Default: 0 (the main diagonal).
.. versionadded:: 1.0
See also
--------
numpy.diagonal : Equivalent numpy function.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> A.diagonal()
array([1, 0, 5])
>>> A.diagonal(k=1)
array([2, 3])
"""
return self.tocsr().diagonal(k=k)
def setdiag(self, values, k=0):
"""
Set diagonal or off-diagonal elements of the array.
Parameters
----------
values : array_like
New values of the diagonal elements.
Values may have any length. If the diagonal is longer than values,
then the remaining diagonal entries will not be set. If values if
longer than the diagonal, then the remaining values are ignored.
If a scalar value is given, all of the diagonal is set to it.
k : int, optional
Which off-diagonal to set, corresponding to elements a[i,i+k].
Default: 0 (the main diagonal).
"""
M, N = self.shape
if (k > 0 and k >= N) or (k < 0 and -k >= M):
raise ValueError("k exceeds matrix dimensions")
self._setdiag(np.asarray(values), k)
def _setdiag(self, values, k):
M, N = self.shape
if k < 0:
if values.ndim == 0:
# broadcast
max_index = min(M+k, N)
for i in range(max_index):
self[i - k, i] = values
else:
max_index = min(M+k, N, len(values))
if max_index <= 0:
return
for i, v in enumerate(values[:max_index]):
self[i - k, i] = v
else:
if values.ndim == 0:
# broadcast
max_index = min(M, N-k)
for i in range(max_index):
self[i, i + k] = values
else:
max_index = min(M, N-k, len(values))
if max_index <= 0:
return
for i, v in enumerate(values[:max_index]):
self[i, i + k] = v
def _process_toarray_args(self, order, out):
if out is not None:
if order is not None:
raise ValueError('order cannot be specified if out '
'is not None')
if out.shape != self.shape or out.dtype != self.dtype:
raise ValueError('out array must be same dtype and shape as '
'sparse matrix')
out[...] = 0.
return out
else:
return np.zeros(self.shape, dtype=self.dtype, order=order)
def isspmatrix(x):
"""Is x of a sparse matrix type?
Parameters
----------
x
object to check for being a sparse matrix
Returns
-------
bool
True if x is a sparse matrix, False otherwise
Notes
-----
issparse and isspmatrix are aliases for the same function.
Examples
--------
>>> from scipy.sparse import csr_matrix, isspmatrix
>>> isspmatrix(csr_matrix([[5]]))
True
>>> from scipy.sparse import isspmatrix
>>> isspmatrix(5)
False
"""
return isinstance(x, spmatrix)
issparse = isspmatrix
| {
"content_hash": "656be6e0e6cccee76a8ba391198bd2eb",
"timestamp": "",
"source": "github",
"line_count": 1220,
"max_line_length": 80,
"avg_line_length": 33.84918032786885,
"alnum_prop": 0.545597636574971,
"repo_name": "arokem/scipy",
"id": "4db1522952cb14d0209fbdfddb4a8d64b48b64b1",
"size": "41296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/sparse/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4399737"
},
{
"name": "C++",
"bytes": "649740"
},
{
"name": "Dockerfile",
"bytes": "1291"
},
{
"name": "Fortran",
"bytes": "5368728"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12815696"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
import scrapy
import unicodedata
import re
from locations.items import GeojsonPointItem
regex = r"(^\D)"
regex_time = r"(1[0-2]|0[1-9]|[1-9]):[0-5]\d\s?[AaPp][Mm]"
regex_am = r"\s?([Aa][Mm])"
regex_pm = r"\s?([Pp][Mm])"
class MonicalsSpider(scrapy.Spider):
name = 'monicals'
allowed_domains =['www.monicals.com']
start_urls = ['http://www.monicals.com/locations/']
def parse(self, response):
stores = response.xpath('//*[@itemprop="name"]/a/@href').extract()
for store in stores:
yield scrapy.Request(store, callback=self.parse_store)
def convert_hours(self, hour):
for i in range(len(hour)):
cleaned_times = ''
if re.match(regex_time, hour[i]):
if ' – ' in hour[i]:
hours_to = hour[i].split(' – ')
for ampm in hours_to:
if re.search(regex_pm, ampm):
ampm = re.sub(regex_pm, '', ampm)
hour_min = ampm.split(":")
if int(hour_min[0]) < 12:
hour_min[0] = str(12 + int(hour_min[0]))
cleaned_times += (":".join(hour_min))
if re.search(regex_am, ampm):
ampm = re.sub(regex_am, '', ampm)
hour_min = ampm.split(":")
if len(hour_min[0]) <2:
hour_min[0] = hour_min[0].zfill(2)
cleaned_times += (":".join(hour_min)) + '-'
else:
cleaned_times += (":".join(hour_min)) + '-'
else:
cleaned_times += "Special Hours"
else:
cleaned_times += "Closed"
hour[i] = cleaned_times
return hour
def convert_days(self, days):
days = [x for x in days if x]
for i in range(len(days)):
days[i] = unicodedata.normalize("NFKD", days[i])
days[i] = days[i].strip()
if days[i]:
if re.match(regex, days[i]):
if 'day' and '–' in days[i]:
day_from = days[i][:2]
try: # Fix for the Chillicolthe store
day_to = days[i].split(' – ')[1][:2]
days[i] = day_from + '-' + day_to
except:
days[i] = day_from + '-' + "Sa"
elif 'day' and '&' in days[i]:
day_from = days[i][:2]
day_to = days[i].split(' & ')[1][:2]
days[i] = day_from + '-' + day_to
else:
days[i] = days[i][:2]
else:
days[i] = ''
days = [x for x in days if x]
return days
def parse_store(self, response):
lat = response.xpath('//*[@id="location-lat"]/@value').extract_first()
lon = response.xpath('//*[@id="location-lng"]/@value').extract_first()
name = response.xpath(
'//div[@class="title-wrap"]/h2/text()').extract_first()
phone = response.xpath(
'//div[@class="title-wrap"]/div/text()').extract_first()
street = response.xpath(
'//li[@itemprop="streetAddress"]/text()').extract_first().strip()
city = response.xpath(
'//span[@itemprop="addressLocality"]/text()').extract_first()
state = response.xpath(
'//span[@itemprop="addressRegion"]/text()').extract_first()
postcode = response.xpath(
'//span[@itemprop="postalCode"]/text()').extract_first()
website = response.xpath(
'//*[@id="my_location_url"]/@value').extract_first()
address = "{}{} {} {}".format(street, city, state, postcode)
# Some pages post notices such as "No longer accepting checks"
# in the day/hours open section
hour = response.xpath(
'//*[@class="location-sidebar-item"][2]/descendant::*[contains('
'., "am") or contains(., "pm") or contains('
'., "Closed")]/text()').extract()
day = self.convert_days(response.xpath(
'//*[@class="location-sidebar-item"][2]/descendant::*[contains('
'., "Sunday") or contains(., "Monday") or contains('
'., "Tuesday") or contains(., "Wednesday") or contains('
'., "Thursday") or contains(., "Friday") or contains('
'., "Saturday")]/text()').extract())
for i in range(len(hour)):
hour[i] = unicodedata.normalize("NFKD", hour[i]) # handle \xa0
hour[i] = hour[i].strip()
hour = [x for x in hour if x]
hour = self.convert_hours(hour)
opening_hours = ', '.join('{} : {}'.format(*t) for t in zip(day, hour))
yield GeojsonPointItem(
lat=lat,
lon=lon,
addr_full=address,
street=street,
city=city,
state=state,
postcode=postcode,
phone=phone,
website=website,
opening_hours=opening_hours,
ref=response.url,
)
| {
"content_hash": "d7fcdcf785ab870dc97e1a9113fd5ebb",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 79,
"avg_line_length": 38.652482269503544,
"alnum_prop": 0.44403669724770645,
"repo_name": "iandees/all-the-places",
"id": "8f2235b007e39bd61780356a1e0a73f5482ba296",
"size": "5483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "locations/spiders/Monicals_pizza.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2134"
},
{
"name": "Python",
"bytes": "116132"
},
{
"name": "Shell",
"bytes": "4477"
}
],
"symlink_target": ""
} |
import unittest,AmConfig.appconf,AmConfig.domainconfig,os
class TestConfigReader(unittest.TestCase):
def test_readappconf(self):
conf = AmConfig.appconf.AppConf(self._mainConfig)
self.assertIsNotNone(conf.MonitorInterval)
self.assertEqual(conf.MonitorInterval,20)
self.assertEqual(conf.DbName,'AvailabilityMonitor')
conf.ReadDomainConfig(self._configPath)
self.assertTrue(len(conf.DomainConfs) == 1)
def test_readdomainconf(self):
conf = AmConfig.domainconfig.DomainConf(self._domainConfig)
self.assertIsNotNone(conf.DomainName)
self.assertEqual(conf.DomainName , 'api.ycapp.yiche.com')
self.assertEqual(conf.Protocol , 'http')
self.assertTrue(len(conf.SrcIpList) == 2)
self.assertTrue(len(conf.UrlDic) == 1)
def setUp(self):
self._startPath = os.path.split(os.path.realpath(__file__))[0]
self._configPath = os.path.join(self._startPath,"Conf")
self._mainConfig = os.path.join(self._configPath,"am.conf")
self._domainConfig = os.path.join(self._configPath,"DomainConf/webapi.conf")
| {
"content_hash": "14dbf5995fabac2b4aadbba1754d98fa",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 84,
"avg_line_length": 43.34615384615385,
"alnum_prop": 0.6903283052351376,
"repo_name": "jinhong666/Python",
"id": "f81786ded9d156f2edd8d984f597a62d7e738773",
"size": "1127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AvailabilityMonitor/tests/test_Config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17511"
},
{
"name": "Shell",
"bytes": "1064"
}
],
"symlink_target": ""
} |
'''
Copyright 2012 Upverter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import re
from beanstream import errors, process_transaction, transaction
log = logging.getLogger('beanstream.recurring_billing')
STATUS_DESCRIPTORS = {
'active' : 'A',
'closed' : 'C',
'on hold' : 'O'
}
STATUS_CODES = {
'A' : 'active',
'C' : 'closed',
'O' : 'on hold'
}
class CreateRecurringBillingAccount(process_transaction.Purchase):
""" Creating a recurring billing account is essentially doing a purchase
transaction with some options specifying recurring billing.
"""
def __init__(self, beanstream, amount, frequency_period,
frequency_increment):
""" Create a new recurring billing account creation transaction.
Arguments:
beanstream: gateway object
amount: the amount to charge on a recurring basis
frequency_period: one of DWMY; used in combination with
frequency_increment to set billing frequency
frequency_increment: numeric; used in combination with
frequency_period to set billing frequency
"""
super(CreateRecurringBillingAccount, self).__init__(beanstream, amount)
self.response_class = CreateRecurringBillingAccountResponse
self.params['trnRecurring'] = '1'
frequency_period = frequency_period.upper()
if frequency_period not in 'DWMY':
raise errors.ValidationException('invalid frequency period specified: %s (must be one of DWMY)' % frequency_period)
self.params['rbBillingPeriod'] = frequency_period
self.params['rbBillingIncrement'] = frequency_increment
def set_end_month(self, on):
if self.params['rbBillingPeriod'] != 'M':
log.warning('cannot set end_month attribute if billing period is not monthly')
return
self.params['rbEndMonth'] = '1' if on else '0'
def set_delay_charge(self, on):
self.params['rbCharge'] = '0' if on else '1'
def set_first_date(self, first_date):
self.params['rbFirstBilling'] = first_date.strftime('%m%d%Y')
def set_second_date(self, second_date):
self.params['rbSecondBilling'] = second_date.strftime('%m%d%Y')
def set_expiry(self, expiry):
self.params['rbExpiry'] = expiry.strftime('%m%d%Y')
def set_tax1(self, on):
self.params['rbApplyTax1'] = '1' if on else '0'
def set_tax2(self, on):
self.params['rbApplyTax2'] = '1' if on else '0'
def set_taxes(self, on):
self.set_tax1(on)
self.set_tax2(on)
class CreateRecurringBillingAccountResponse(process_transaction.PurchaseResponse):
def account_id(self):
''' The account id for the recurring billing account. '''
return self.resp.get('rbAccountId', [None])[0]
class ModifyRecurringBillingAccount(transaction.Transaction):
def __init__(self, beanstream, account_id):
super(ModifyRecurringBillingAccount, self).__init__(beanstream)
self.url = self.URLS['recurring_billing']
self.response_class = ModifyRecurringBillingAccountResponse
if not self.beanstream.recurring_billing_passcode:
raise errors.ConfigurationException('recurring billing passcode must be specified to modify recurring billing accounts')
self.params['merchantId'] = self.beanstream.merchant_id
self.params['serviceVersion'] = '1.0'
self.params['operationType'] = 'M'
self.params['passcode'] = self.beanstream.recurring_billing_passcode
self.params['responseFormat'] = 'QS'
self.params['rbAccountId'] = account_id
def parse_raw_response(self, body):
pattern = re.compile(r'^<\?xml version="1\.0".*>\s*<response>\s*<accountId>([^<]+)</accountId>\s*<code>(\d+)</code>\s*<message>(.*)</message>\s*</response>\s*$')
m = pattern.match(body)
if m:
account_id, response_code, message = m.groups()
return {
'accountId': [account_id],
'code': [response_code],
'message': [message]
}
else:
raise errors.ValidationException('unexpected message format received: %s' % body)
def set_amount(self, amount):
self.params['Amount'] = self._process_amount(amount)
def set_billing_state(self, billing_state):
billing_state = billing_state.lower()
if billing_state not in STATUS_DESCRIPTORS:
raise errors.ValidationException('invalid billing state option specified: %s' % billing_state)
self.params['rbBillingState'] = STATUS_DESCRIPTORS[billing_state]
def set_comments(self, comments):
self.params['trnComments'] = comments
def set_first_date(self, first_date):
self.params['rbFirstBilling'] = first_date.strftime('%m%d%Y')
def set_second_date(self, second_date):
self.params['rbSecondBilling'] = second_date.strftime('%m%d%Y')
def set_expiry(self, expiry):
self.params['rbExpiry'] = expiry.strftime('%m%d%Y')
def set_frequency_period(self, frequency_period):
frequency_period = frequency_period.upper()
if frequency_period not in 'DWMY':
raise errors.ValidationException('invalid frequency period specified: %s (must be one of DMWY)' % frequency_period)
self.params['rbBillingPeriod'] = frequency_period
def set_frequency_increment(self, frequency_increment):
self.params['rbBillingIncrement'] = frequency_increment
def set_tax1(self, on):
self.params['rbApplyTax1'] = '1' if on else '0'
def set_tax2(self, on):
self.params['rbApplyTax2'] = '1' if on else '0'
def set_taxes(self, on):
self.set_tax1(on)
self.set_tax2(on)
def set_end_month(self, on):
if self.params['rbBillingPeriod'] != 'M':
log.warning('cannot set end_month attribute if billing period is not monthly')
return
self.params['rbBillingEndMonth'] = '1' if on else '0'
def set_never_expires(self, on):
self.params['rbNeverExpires'] = '1' if on else '0'
def set_process_back_payments(self, on):
self.params['processBackPayments'] = '1' if on else '0'
class ModifyRecurringBillingAccountResponse(transaction.Response):
def approved(self):
return self.resp.get('code', [0])[0] == '1'
def message(self):
return self.resp.get('message', [None])[0]
| {
"content_hash": "2cdc292d96e264d0fd11294608a398ad",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 169,
"avg_line_length": 34.66831683168317,
"alnum_prop": 0.6487219762958732,
"repo_name": "Beanstream-DRWP/beanstream-python",
"id": "2d94a8b48aa0b5dc97d39bfaddc154bd5e8b008f",
"size": "7003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beanstream/recurring_billing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1806487"
}
],
"symlink_target": ""
} |
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import time
import json
from functools import wraps
from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
from flask import session
from flask import flash
import gconf
import user
import logscount
app = Flask(__name__)
app.secret_key = os.urandom(32)
def login_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if session.get('user') is None:
return redirect('/')
rt = func(*args, **kwargs)
return rt
return wrapper
@app.route('/')
def index():
return render_template('login.html')
@app.route('/login/', methods=['get','post'])
def login():
params = request.args if request.method == 'GET' else request.form
username = params.get('username', '')
password = params.get('password', '')
if user.validate_login(username, password):
session['user'] = {'username': username}
return redirect('/logs/')
else:
return render_template('login.html', username=username, error='username or password is error')
@app.route('/logout/')
def logout():
session.clear()
print session
return redirect('/')
@app.route('/logs/')
@login_required
def logs():
cut = request.args.get('cut')
cut = int(cut) if str(cut).isdigit() else 10
rt_list = logscount.logscount(cut=cut)
return render_template('logs.html', rt_list=rt_list, logs_info="请选择文件 ")
@app.route('/useradd/', methods=['post'])
@login_required
def useradd():
params = request.args if request.method == 'GET' else request.form
username = request.form.get('useradd_username', '')
password = request.form.get('useradd_password', '')
age = request.form.get('useradd_age', '')
gender = request.form.get('useradd_gender', '男')
email = request.form.get('useradd_email', '')
if username and len(password) >= 6 and age and gender and email:
if user.user_add(username, password, age, gender, email):
flash('恭喜您,添加用户:"%s" 成功' % username)
# return redirect('/users/')
_is_ok = True
return json.dumps({"is_ok":_is_ok})
else:
# return render_template('useradd.html', useradd_info='username or password or age is error')
_is_ok = False
return json.dumps({"is_ok":_is_ok, "error":"用户已经存在"})
else:
_is_ok = False
return json.dumps({'is_ok':_is_ok, 'error':'用户信息不完整,密码必须大于6位'})
@app.route('/userdel/', methods=['post'])
@login_required
def userdel():
user_id = request.form.get('user_id', '')
if user_id:
if user.user_del(user_id):
flash('删除用户成功')
# return redirect('/users/')
_is_ok = True
return json.dumps({'is_ok':_is_ok})
else:
# return render_template('users.html', user_list=user.get_users(), user_info='抱歉,删除用户失败')
_is_ok = False
return json.dumps({'is_ok':_is_ok, 'error':'用户信息不存在'})
else:
# return render_template('userdel.html', user_info='错误,用户信息不存在')
_is_ok = False
return json.dumps({'is_ok':_is_ok, 'error':'用户ID为空'})
@app.route('/useredit/', methods=['post'])
@login_required
def useredit():
user_id = request.form.get('user_id', '')
useredit_age = request.form.get('useredit_age', '')
useredit_gender = request.form.get('useredit_gender', '')
useredit_email = request.form.get('useredit_email', '')
if user_id and useredit_age and useredit_gender and useredit_email:
if user.user_edit(age=useredit_age, user_id=user_id, gender=useredit_gender, email=useredit_email):
flash('恭喜您,用户信息更改成功')
_is_ok = True
return json.dumps({'is_ok':_is_ok})
#return redirect('/users/')
else:
#return render_template('useredit.html', useredit_info='抱歉,更新用户信息失败')
_is_ok = False
return json.dumps({'is_ok':_is_ok,'error':'更改用户信息失败'})
else:
#return render_template('useredit.html', useredit_info='错误,用户信息不存在')
_is_ok = False
return json.dumps({'is_ok':_is_ok, 'error':'用户信息填写不完整'})
@app.route('/upload_logs/', methods=['get','post'])
@login_required
def upload_logs():
upload_logs = request.files.get('upload_logs')
if upload_logs:
filename=upload_logs.filename + "." + time.strftime("%Y%m%d%H%M%S", time.localtime())
logs_filename = gconf.LOG_PATH + filename
#return logs_filename
upload_logs.save(logs_filename)
logscount.load_data_logs(logs_filename)
flash('恭喜您文件[%s] 处理成功' % upload_logs.filename )
return redirect('/logs/')
else:
return redirect('/logs/')
@app.route('/users/')
@login_required
def users():
return render_template('users.html', user_list=user.get_users())
@app.route('/updateuserpassword/', methods=['POST'])
@login_required
def charge_user_password():
user_id = request.form.get('userid')
manager_password = request.form.get('manager_password')
user_password = request.form.get('user_password')
print user_id, user_password
_is_ok, _error = user.validate_charge_user_password(user_id, user_password, session['user']['username'], manager_password)
if _is_ok:
user.charge_user_password(user_id, user_password)
return json.dumps({'is_ok':_is_ok, "error":_error})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80,debug=True)
| {
"content_hash": "39dbf6446b25386a7b855675f9e16953",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 123,
"avg_line_length": 31.3125,
"alnum_prop": 0.6790419161676646,
"repo_name": "51reboot/actual_09_homework",
"id": "1aaee2a3487f4a4e6fd479973aecac344316a489",
"size": "5277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "08/hjun/app1/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4623850"
},
{
"name": "HTML",
"bytes": "90670692"
},
{
"name": "JavaScript",
"bytes": "31827839"
},
{
"name": "Nginx",
"bytes": "1073"
},
{
"name": "PHP",
"bytes": "349512"
},
{
"name": "Python",
"bytes": "1705997"
},
{
"name": "Shell",
"bytes": "10001"
},
{
"name": "Smarty",
"bytes": "342164"
}
],
"symlink_target": ""
} |
import win32api
import win32con
import time
import math
def move(x, y):
"""
Moves the cursor to (x, y)
:param x: target x-ordinate
:param y: target y-ordinate
:return: None
"""
win32api.SetCursorPos((x, y))
def move_line(x, y, speed = 1):
"""
Moves the cursor in a straight line to (x, y) at a certain speed
:param x: target x-ordinate
:param y: target y-ordinate
:param speed: pixel traversal rate
:return: None
"""
_x, _y = win32api.GetCursorPos()
if x - _x:
m = (y - _y) / (x - _x)
c = y - (m * x)
for a in range(_x, x + 1, speed) if _x < x else range(_x, x - 1, -speed):
b = int(m * a + c)
move(a, b)
time.sleep(0.01)
else:
for b in range(_y, y + 1, speed) if _y <= y else range(_y, y - 1, -speed):
move(x, b)
time.sleep(0.01)
move(x, y)
def move_arc(x, y, r, speed = 1, orientation = True):
# WARNING: This function currently contains inaccuracy likely due to the rounding of trigonometric functions
"""
Moves the cursor in an arc of radius r to (x, y) at a certain speed
:param x: target x-ordinate
:param y: target y-ordinate
:param r: radius
:param speed: pixel traversal rate
:param orientation: direction of arc
:return: None
"""
_x, _y = win32api.GetCursorPos()
c_len = (r**2 - (((x - _x)/2)**2 + ((y - _y)/2)**2))**0.5
t = (c_len**2/((y - _y)**2 + (x - _x)**2))**0.5
t = t if orientation else -t
centre = ((_x + x)/2 + t*(_x - x), (_y + y)/2 + t*(y - _y))
if any(isinstance(ordinate, complex) for ordinate in centre):
raise ValueError("Radius too low - minimum: {}".format(((x - _x)**2 + (y - _y)**2)**0.5/2))
theta = math.atan2(_y - centre[1], _x - centre[0])
end = math.atan2(y - centre[1], x - centre[0])
while theta < end:
move(*list(map(round, (centre[0] + r*math.cos(theta), centre[1] + r*math.sin(theta)))))
theta += speed/100
time.sleep(0.01)
move(x, y)
def drag(*args, function = move):
"""
Drags the mouse along a specified path
:param args: list of arguments passed to function
:param function: path to traverse
:return: None
"""
x, y = win32api.GetCursorPos()
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)
function(*args)
x, y = win32api.GetCursorPos()
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)
def click_left(x = None, y = None, hold_time = 0):
"""
Simulates a mouse left click on pixel (x,y) if x and y are provided
If x and y are not passed to this function, a mouse click is simulated at the current (x,y)
:param x: target x-ordinate
:param y: target y-ordinate
:param hold_time: length of time to hold the mouse's left button
:return: None
"""
if not x or not y:
cursor = win32api.GetCursorPos()
if not x:
x = cursor[0]
if not y:
y = cursor[1]
move(x, y)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)
time.sleep(hold_time)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)
def click_right(x = None, y = None, hold_time = 0):
"""
Simulates a mouse right click on pixel (x,y) if x and y are provided
If x and y are not passed to this function, a mouse click is simulated at the current (x,y)
:param x: target x-ordinate
:param y: target y-ordinate
:param hold_time: length of time to hold the mouse's right button
:return: None
"""
if not x or not y:
cursor = win32api.GetCursorPos()
if not x:
x = cursor[0]
if not y:
y = cursor[1]
move(x, y)
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN, x, y, 0, 0)
time.sleep(hold_time)
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP, x, y, 0, 0) | {
"content_hash": "346fc4b069c5c66e0ecac8b5073f116e",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 112,
"avg_line_length": 30.00763358778626,
"alnum_prop": 0.5779699821928262,
"repo_name": "EwilDawe/typy",
"id": "24aac63613caca110c84edc623235057ec683b23",
"size": "3933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "typy/mouse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5016"
}
],
"symlink_target": ""
} |
from pyro.contrib.funsor.handlers.primitives import to_data
from pyro.poutine.replay_messenger import ReplayMessenger as OrigReplayMessenger
class ReplayMessenger(OrigReplayMessenger):
"""
This version of :class:`~ReplayMessenger` is almost identical to the original version,
except that it calls :func:`~pyro.contrib.funsor.to_data` on the replayed funsor values.
This may result in different unpacked shapes, but should produce correct allocations.
"""
def _pyro_sample(self, msg):
name = msg["name"]
msg[
"replay_active"
] = True # indicate replaying so importance weights can be scaled
if self.trace is None:
return
if name in self.trace:
guide_msg = self.trace.nodes[name]
msg["funsor"] = {} if "funsor" not in msg else msg["funsor"]
if guide_msg["type"] != "sample":
raise RuntimeError("site {} must be sample in trace".format(name))
# TODO make this work with sequential enumeration
if guide_msg.get("funsor", {}).get("value", None) is not None:
msg["value"] = to_data(
guide_msg["funsor"]["value"]
) # only difference is here
else:
msg["value"] = guide_msg["value"]
msg["infer"] = guide_msg["infer"]
msg["done"] = True
# indicates that this site was latent and replayed, so its importance weight is p/q
msg["replay_skipped"] = False
else:
# indicates that this site was latent and not replayed, so its importance weight is 1
msg["replay_skipped"] = msg.get("replay_skipped", True)
| {
"content_hash": "fd5fc76dc06d580b8744a48e98a03fed",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 97,
"avg_line_length": 45.23684210526316,
"alnum_prop": 0.6009307737056429,
"repo_name": "uber/pyro",
"id": "2f6a76033aaccfe1f983b79fe8643c75b31736ff",
"size": "1804",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pyro/contrib/funsor/handlers/replay_messenger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "6121"
},
{
"name": "CSS",
"bytes": "478"
},
{
"name": "Dockerfile",
"bytes": "1635"
},
{
"name": "Makefile",
"bytes": "6857"
},
{
"name": "Python",
"bytes": "3388193"
},
{
"name": "Shell",
"bytes": "6465"
},
{
"name": "TeX",
"bytes": "3649"
}
],
"symlink_target": ""
} |
DOCUMENTATION = '''
---
module: dircopy
short_description: copies directories recursively. Intended to bypass the slowness problem with Ansible copy module.
description:
- Using the C tar create one file to transfer it to the target host(s) (using Ansible built-in copy module)
and to check/update the target directory.
options:
src:
description:
- Directory on the source host that will be copied to the destination; The path can be absolute or relative.
required: true
dest:
description:
- Directory on the destination host that will be synchronized from the source;
The path should absolute. If dest doesn't exists it will be created.
required: true
mode:
description:
- permissions of the target, after execution
type: string
sample: "0644"
required: false
identical:
description:
- Delete files in dest that don't exist (after transfer, not before) in the src path.
choices: [ 'yes', 'no' ]
default: 'no'
required: false
owner:
description:
- target ownership
default: the Ansible user
required: false
group:
description:
- target group membership
default: the Ansible user's group
required: false
gzip:
description:
- gzip the directory on transfer (applicable only on directory copying)
default: False
specialx:
description:
- set execution flag additionally to owner or group rights (in case owner/group/others have any right on target)
verbose:
description:
- module exits with detailed information of updates, removals, etc.
notes:
- tar must be installed on both the local and remote host.
- The module does not preserve the file ownership an permissions (you can set or it defaults to the Asible user
and the target's umask)
- The source cannot be "/"
author: "T. Czecher (ct@index.hu)"
'''
EXAMPLES = '''
# Copy of src on the control machine to dest on the remote hosts
dircopy: src=some/relative/path dest=/some/absolute/path
# Create an exact copy (delete files not in src)
dircopy:
src: /tmp/helloworld
dest: /var/www/helloworld
identical: yes
dircopy: src=/tmp/test.tgz dest=/tmp/test
'''
from ansible.module_utils.basic import *
import os
import pwd
import grp
import subprocess
from operator import itemgetter
class File(object):
def __init__(self, path):
path = os.path.abspath(path)
self.path = path if os.path.exists(path) else None
self.uid, self.gid, self.mode = self.get_rights()
def get_rights(self):
if self.path:
st = os.stat(self.path)
mode = str(oct(st.st_mode))[-4:] if self.path else None
return st.st_uid, st.st_gid, mode
else:
return None, None, None
def size(self):
return os.stat(self.path).st_size if self.path else None
def set_owner(self, uid, gid):
os.chown(self.path, uid, gid)
def set_mode(self, mode):
os.chmod(self.path, int(mode, 8))
def get_owner_name(self):
return pwd.getpwuid(os.stat(self.path).st_uid).pw_name
def get_group_name(self):
return grp.getgrgid(os.stat(self.path).st_gid).gr_name
class TarFile(object):
def __init__(self, path, ansible_module, self_created=False):
self.tarfile = path
self.module = ansible_module
self.check_mode = self.module.check_mode
self.self_created = self_created
@staticmethod
def _parse_tar_out(tar_out):
missing_files = set()
for line in tar_out.split("\n"):
if not line.strip():
continue
if "Cannot stat" in line:
missing_files.add(line.split(":")[1].strip())
return missing_files
def _runner(self, cmd):
(rc, stdout, error) = self.module.run_command(cmd, use_unsafe_shell=False)
if rc not in (0, 1):
self.module.fail_json(change=False, msg=error)
else:
return stdout + error
def list(self):
stdout = self._runner("tar -tvf %s" % self.tarfile)
if stdout:
lines = filter(None, [line for line in stdout.split("\n")])
tar_listed_dirs = [" ".join(line.split()[5:]) for line in lines if line.split()[0][:1] == "d"]
# tar doesn't list directories contain no file, so
unlisted_dirs = [set(d.split("/")[:-1]) for d in tar_listed_dirs if "/" in d]
unlisted_dirs = set.union(*unlisted_dirs)
if unlisted_dirs:
dirs = set.union(unlisted_dirs, set(tar_listed_dirs))
else:
unlisted_dirs = set([])
paths = set(self._remove_leading_slash([" ".join(line.split()[5:]) for line in lines]))
files = paths - dirs
return files, dirs
return None, None
@staticmethod
def _remove_leading_slash(a_list):
for i, item in enumerate(a_list):
if item.startswith("/"):
a_list[i] = item[1:]
return a_list
@staticmethod
def _add_leading_slash(a_list):
for i, item in enumerate(a_list):
item = item.strip()
if not item.startswith("/"):
a_list[i] = "/" + item
return a_list
def untar(self, target):
if self.check_mode:
return
command = "tar --preserve-permissions -xf %s -C %s" % (self.tarfile, target)
self._runner(command)
def compare(self, target):
command = "tar --compare --file %s -C %s" % (self.tarfile, target)
tar_out = self._runner(command)
if tar_out:
return self._parse_tar_out(tar_out.strip())
return None
def update(self, target, files2update):
files, dirs = self.list()
if self.check_mode:
return
self._add_leading_slash(list(files2update))
for f in files2update:
command = ['tar', '-xf', self.tarfile, "-C", target, f]
_ = self._runner(command)
def umask2mode(umask):
perms = [i.split("=")[1] for i in umask.split(",")]
perms = ["-" if not i else i for i in perms]
to_num = lambda x: x.replace("r", "4").replace("w", "2").replace("x", "1").replace("-", "0")
mode_num = lambda x: sum([int(i) for i in to_num(x)])
mode_string = "".join([str(mode_num(i)) for i in perms])
return mode_string
def check_permissions(dest, uid, gid, perms, dir_perms):
dest_files, dest_dirs = get_files(dest)
files = [File(os.path.abspath(f)) for f in dest_files]
files = [f for f in files if f.path]
dirs = [File(os.path.abspath(d)) for d in dest_dirs]
dirs = [d for d in dirs if d.path]
files_ownership2update = set([f for f in files if (f.uid != uid)] + [f for f in files if (f.gid != gid)])
dirs_ownership2update = set([d for d in dirs if (d.uid != uid or d.gid != gid)])
ownership2update = list(files_ownership2update) + list(dirs_ownership2update)
files_mode2update = [f for f in files if f.mode != perms]
dirs_mode2update = [d for d in dirs if d.mode != dir_perms]
return ownership2update, files_mode2update, dirs_mode2update
def get_files(target):
files = set()
dirs = list()
for path, sub_dirs, _files in os.walk(target):
if os.path.isdir(path):
dirs.append(path)
for name in _files:
files.add(os.path.abspath(os.path.join(path, name)))
# Correct path endings
for i, d in enumerate(dirs):
dirs[i] = os.path.abspath(d)
return files, set(dirs)
def perms_with_exec(mode_string):
sticky_bit = mode_string[:1] if len(mode_string) == 4 else ""
modes = [m for m in mode_string[-3:]]
new_mode_string = sticky_bit
for m in modes:
mode = int(m)
if mode % 2 == 0:
mode = mode + 1 if mode else 0
new_mode_string += str(mode)
return new_mode_string
def make_identical(target, tarfile):
(files_in_tar, dirs_in_tar) = tarfile.list()
files_in_tar = set([os.path.abspath(target + f) for f in files_in_tar])
dirs_in_tar = set([os.path.abspath(target + f) for f in dirs_in_tar])
(target_files, target_dirs) = get_files(target)
spare_files = target_files - files_in_tar
spare_dirs = target_dirs - dirs_in_tar - set([os.path.abspath(target)])
return spare_files, spare_dirs
def remove_spares(files, dirs):
for f in files:
os.remove(os.path.join(f))
dirs_with_path_length = zip(list(dirs), [len(d.split("/")) for d in dirs])
dirs_sorted_by_path = sorted(dirs_with_path_length, key=itemgetter(1), reverse=True)
for d in dirs_sorted_by_path:
try:
os.rmdir(d[0])
except OSError as e:
return e
return None
def main():
argument_spec = {
"src": {"required": True},
"dest": {"required": True},
"owner": {"required": False},
"group": {"required": False},
"mode": {"required": False},
"identical": dict(default=False, aliases=['delete'], type='bool'),
"_arch_root": dict(required=False),
"verbose": dict(default=True, type='bool'),
"remote_tmp": dict(default="/tmp"),
"specialx": dict(default=False, type='bool'),
"_tmpfile": dict(required=False),
"source_is_directory": dict(required=True),
}
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
check_mode = module.check_mode
params = module.params
src = params["src"]
dest = params["dest"]
if dest[-1:] != "/":
dest += "/"
owner = params["owner"]
group = params["group"]
mode = params["mode"]
identical = params["identical"]
verbose = params["verbose"]
specialx = params["specialx"]
source_is_directory = ["source_is_directory"]
tmpdir = params["remote_tmp"]
tmpfile = os.path.join(tmpdir, params["_tmpfile"])
if owner:
if owner.isdigit():
uid = int(owner)
else:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
module.exit_json(failed=True, changed=False, msg="No such a user: %s" % owner)
if group:
if group.isdigit():
gid = int(group)
else:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
module.exit_json(failed=True, changed=False, msg="No such a group: %s" % group)
else:
gid = pwd.getpwnam(owner).pw_gid
if not mode:
umask = subprocess.check_output(["umask", "-S"])
mode = umask2mode(umask.strip())
failed = False
changed = False
files2update = None
removed = False
msg = list()
updated = None
diff = {
'before': dict(updated=dict()),
'after': dict(updated=dict())
}
diff_updated_ownership_before = dict()
diff_updated_mode_before = dict()
tarfile = TarFile(path=tmpfile, ansible_module=module)
exit_cmd = "module.exit_json(failed=failed, changed=changed, msg=dict(enumerate(msg)), diff=diff"
if os.path.exists(dest):
_update_msg = "differ(s)" if check_mode else "updated"
if not os.path.isdir(dest):
failed = True
msg = "Destination (%s) is not a directory" % dest
eval(exit_cmd)
elif os.listdir(dest) != "":
files2update = tarfile.compare(target=dest)
if files2update:
changed = True
for f in files2update:
file_object = File(path=os.path.join(dest, f))
diff['before']['updated'][os.path.join(dest, f)] = file_object.size()
tarfile.update(target=dest, files2update=files2update)
updated = set([os.path.join(dest, d) for d in files2update])
for f in updated:
file_object = File(path=f)
diff_updated_ownership_before[f] = "{}/{}".format(file_object.get_owner_name(),
file_object.get_group_name())
diff_updated_mode_before[f] = file_object.mode
diff['after']['updated'][f] = file_object.size()
msg.append("%s file(s) %s " % (len(files2update), _update_msg))
if identical and os.listdir(dest) != "":
spare_files, spare_dirs = make_identical(target=dest, tarfile=tarfile)
if (spare_files or spare_dirs) and not check_mode:
failed2remove = remove_spares(files=spare_files, dirs=spare_dirs)
if failed2remove:
module.exit_json(failed=True, changed=False, msg=str(failed2remove))
removed = spare_files | spare_dirs
if removed:
changed = True
_remove_msg = "would be removed" if check_mode else "removed"
msg.append("%s file(s) and %s dir(s) %s" % (len(spare_files), len(spare_dirs), _remove_msg))
if removed:
if check_mode:
diff['before']['removed'] = removed
else:
diff['before']['removed'] = removed
diff['after']['would_be_removed'] = []
dir_mode = perms_with_exec(mode) if specialx else mode
ownership2update, files_mode2update, dirs_mode2update = check_permissions(dest=dest, uid=uid, gid=gid,
perms=mode, dir_perms=dir_mode)
if ownership2update:
changed = True
ownership_before = dict()
ownership_after = dict()
for file_object in ownership2update:
ownership_before[file_object.path] = "{}/{}".\
format(file_object.get_owner_name(), file_object.get_group_name())
ownership_after[file_object.path] = "{}/{}".format(pwd.getpwuid(uid).pw_name, grp.getgrgid(gid).gr_name)
if not check_mode:
file_object.set_owner(uid, gid)
diff['before']['ownership'] = ownership_before
diff['after']['ownership'] = ownership_after
diff['before']['ownership'].update(diff_updated_ownership_before)
diff['before']['mode'] = dict()
diff['after']['mode'] = dict()
mode_before = diff_updated_mode_before
mode_after = dict()
if files_mode2update:
changed = True
for file_object in files_mode2update:
mode_before[file_object.path] = file_object.mode
mode_after[file_object.path] = mode
if not check_mode:
file_object.set_mode(mode)
if dirs_mode2update:
changed = True
for directory in dirs_mode2update:
mode_before[directory.path] = directory.mode
mode_after[directory.path] = dir_mode
if not check_mode:
directory.set_mode(dir_mode)
if files_mode2update or dirs_mode2update:
diff['before']['mode'].update(mode_before)
diff['after']['mode'].update(mode_after)
if not changed:
msg = ["No update needed."]
elif verbose:
if diff['before']['mode']:
msg.append("%s file/dir mode(s) %s" % (len(files_mode2update) + len(dirs_mode2update), _update_msg))
if ownership2update:
msg.append("%s file/dir ownership %s" % (len(ownership2update), _update_msg))
if removed:
if check_mode:
exit_cmd += ", would_be_removed=list(removed)"
else:
exit_cmd += ", removed=list(removed)"
if updated:
if check_mode:
exit_cmd += ", files_to_update=list(updated)"
else:
exit_cmd += ", updated_files=list(updated)"
exit_cmd += ")"
eval(exit_cmd)
# Target doesn't exist -> untar
if not check_mode:
os.mkdir(dest)
tarfile.untar(target=dest)
check_permissions(dest=dest, uid=uid, gid=gid, perms=mode)
changed = True
_msg = "copied" if source_is_directory else "extracted"
msg = "%s %s to %s " % (src, _msg, dest)
updated = True
if verbose:
exit_cmd += ", 'extracted'=updated)"
else:
msg = "Target directory does not exists."
failed = False
eval(exit_cmd)
if __name__ == '__main__':
main()
| {
"content_hash": "4e48fe646539046bbb4f2be21e50f51a",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 120,
"avg_line_length": 37.38074398249453,
"alnum_prop": 0.5547620441374466,
"repo_name": "namondo/ansible-dircopy",
"id": "c38a1328946e58708e4f7360d15dcc16f3cf093d",
"size": "17131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/dircopy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22720"
}
],
"symlink_target": ""
} |
"""
This module contains functions for solving for the steady state density matrix
of open quantum systems defined by a Liouvillian or Hamiltonian and a list of
collapse operators.
"""
# The only substantial change is the use of my liouvillian function!
__all__ = ['steadystate', 'steady', 'build_preconditioner',
'pseudo_inverse']
import warnings
import time
import scipy
import numpy as np
from numpy.linalg import svd
from scipy import prod
import scipy.sparse as sp
import scipy.linalg as la
from scipy.sparse.linalg import (use_solver, splu, spilu, spsolve, eigs)
from scipy.sparse.linalg import (LinearOperator, gmres, lgmres, bicgstab)
from quantum.superoperator import (liouvillian, vec2mat, mat2vec, spre,
operator_to_vector)
from qutip.qobj import Qobj, issuper, isoper
from qutip.sparse import (sp_permute, sp_bandwidth, sp_reshape, sp_profile)
from qutip.cy.spmath import zcsr_kron
from qutip.graph import reverse_cuthill_mckee, weighted_bipartite_matching
from qutip import (tensor, identity)
import qutip.settings as settings
from qutip.utilities import _version2int
from qutip.cy.spconvert import dense2D_to_fastcsr_fmode
# Load MKL spsolve if avaiable
if settings.has_mkl:
from qutip._mkl.spsolve import (mkl_splu, mkl_spsolve)
# test if scipy is recent enought to get L & U factors from superLU
_scipy_check = _version2int(scipy.__version__) >= _version2int('0.14.0')
def _empty_info_dict():
def_info = {'perm': [], 'solution_time': None, 'iterations': None,
'residual_norm': None, 'rcm_time': None, 'wbm_time': None,
'iter_time': None, 'precond_time': None, 'ILU_MILU': None,
'fill_factor': None, 'diag_pivot_thresh': None,
'drop_tol': None, 'permc_spec': None, 'weight': None}
return def_info
def _default_steadystate_args():
def_args = {'method': 'direct', 'sparse': True, 'use_rcm': False,
'use_wbm': False, 'weight': None, 'use_precond': False,
'all_states': False, 'M': None, 'x0': None, 'drop_tol': 1e-4,
'fill_factor': 100, 'diag_pivot_thresh': None, 'maxiter': 1000,
'tol': 1e-12, 'permc_spec': 'COLAMD', 'ILU_MILU': 'smilu_2',
'restart': 20, 'return_info': False,
'info': _empty_info_dict(), 'verbose': False}
return def_args
def steadystate(A, c_op_list=[], **kwargs):
"""Calculates the steady state for quantum evolution subject to the
supplied Hamiltonian or Liouvillian operator and (if given a Hamiltonian) a
list of collapse operators.
If the user passes a Hamiltonian then it, along with the list of collapse
operators, will be converted into a Liouvillian operator in Lindblad form.
Parameters
----------
A : qobj
A Hamiltonian or Liouvillian operator.
c_op_list : list
A list of collapse operators.
method : str {'direct', 'eigen', 'iterative-gmres',
'iterative-lgmres', 'iterative-bicgstab', 'svd', 'power',
'power-gmres', 'power-lgmres', 'power-bicgstab'}
Method for solving the underlying linear equation. Direct LU solver
'direct' (default), sparse eigenvalue problem 'eigen',
iterative GMRES method 'iterative-gmres', iterative LGMRES method
'iterative-lgmres', iterative BICGSTAB method 'iterative-bicgstab',
SVD 'svd' (dense), or inverse-power method 'power'. The iterative
power methods 'power-gmres', 'power-lgmres', 'power-bicgstab' use
the same solvers as their direct counterparts.
return_info : bool, optional, default = False
Return a dictionary of solver-specific infomation about the
solution and how it was obtained.
sparse : bool, optional, default = True
Solve for the steady state using sparse algorithms. If set to False,
the underlying Liouvillian operator will be converted into a dense
matrix. Use only for 'smaller' systems.
use_rcm : bool, optional, default = False
Use reverse Cuthill-Mckee reordering to minimize fill-in in the
LU factorization of the Liouvillian.
use_wbm : bool, optional, default = False
Use Weighted Bipartite Matching reordering to make the Liouvillian
diagonally dominant. This is useful for iterative preconditioners
only, and is set to ``True`` by default when finding a preconditioner.
weight : float, optional
Sets the size of the elements used for adding the unity trace condition
to the linear solvers. This is set to the average abs value of the
Liouvillian elements if not specified by the user.
x0 : ndarray, optional
ITERATIVE ONLY. Initial guess for solution vector.
maxiter : int, optional, default=1000
ITERATIVE ONLY. Maximum number of iterations to perform.
tol : float, optional, default=1e-12
ITERATIVE ONLY. Tolerance used for terminating solver.
permc_spec : str, optional, default='COLAMD'
ITERATIVE ONLY. Column ordering used internally by superLU for the
'direct' LU decomposition method. Options include 'COLAMD' and
'NATURAL'. If using RCM then this is set to 'NATURAL' automatically
unless explicitly specified.
use_precond : bool optional, default = False
ITERATIVE ONLY. Use an incomplete sparse LU decomposition as a
preconditioner for the 'iterative' GMRES and BICG solvers.
Speeds up convergence time by orders of magnitude in many cases.
M : {sparse matrix, dense matrix, LinearOperator}, optional
ITERATIVE ONLY. Preconditioner for A. The preconditioner should
approximate the inverse of A. Effective preconditioning can
dramatically improve the rate of convergence for iterative methods.
If no preconditioner is given and ``use_precond = True``, then one
is generated automatically.
fill_factor : float, optional, default = 100
ITERATIVE ONLY. Specifies the fill ratio upper bound (>=1) of the iLU
preconditioner. Lower values save memory at the cost of longer
execution times and a possible singular factorization.
drop_tol : float, optional, default = 1e-4
ITERATIVE ONLY. Sets the threshold for the magnitude of preconditioner
elements that should be dropped. Can be reduced for a courser
factorization at the cost of an increased number of iterations, and a
possible singular factorization.
diag_pivot_thresh : float, optional, default = None
ITERATIVE ONLY. Sets the threshold between [0,1] for which diagonal
elements are considered acceptable pivot points when using a
preconditioner. A value of zero forces the pivot to be the diagonal
element.
ILU_MILU : str, optional, default = 'smilu_2'
ITERATIVE ONLY. Selects the incomplete LU decomposition method
algoithm used in creating the preconditoner. Should only be used by
advanced users.
Returns
-------
dm : qobj
Steady state density matrix.
info : dict, optional
Dictionary containing solver-specific information about the solution.
Notes
-----
The SVD method works only for dense operators (i.e. small systems).
"""
ss_args = _default_steadystate_args()
for key in kwargs.keys():
if key in ss_args.keys():
ss_args[key] = kwargs[key]
else:
raise Exception(
"Invalid keyword argument '" + key + "' passed to steadystate.")
# Set column perm to NATURAL if using RCM and not specified by user
if ss_args['use_rcm'] and ('permc_spec' not in kwargs.keys()):
ss_args['permc_spec'] = 'NATURAL'
# Create & check Liouvillian
A = _steadystate_setup(A, c_op_list)
# Set weight parameter to avg abs val in L if not set explicitly
if 'weight' not in kwargs.keys():
ss_args['info']['weight']
ss_args['weight'] = np.mean(np.abs(A.data.data.max()))
ss_args['info']['weight'] = ss_args['weight']
if ss_args['method'] == 'direct':
if ss_args['sparse']:
return _steadystate_direct_sparse(A, ss_args)
else:
return _steadystate_direct_dense(A, ss_args)
elif ss_args['method'] == 'eigen':
return _steadystate_eigen(A, ss_args)
elif ss_args['method'] in ['iterative-gmres',
'iterative-lgmres', 'iterative-bicgstab']:
return _steadystate_iterative(A, ss_args)
elif ss_args['method'] == 'svd':
return _steadystate_svd_dense(A, ss_args)
elif ss_args['method'] in ['power', 'power-gmres',
'power-lgmres', 'power-bicgstab']:
return _steadystate_power(A, ss_args)
else:
raise ValueError('Invalid method argument for steadystate.')
def _steadystate_setup(A, c_op_list):
"""Build Liouvillian (if necessary) and check input.
"""
if isoper(A):
if len(c_op_list) > 0:
return liouvillian(A, c_op_list)
raise TypeError('Cannot calculate the steady state for a ' +
'non-dissipative system ' +
'(no collapse operators given)')
elif issuper(A):
return A
else:
raise TypeError('Solving for steady states requires ' +
'Liouvillian (super) operators')
def _steadystate_LU_liouvillian(L, ss_args, has_mkl=0):
"""Creates modified Liouvillian for LU based SS methods.
"""
perm = None
perm2 = None
rev_perm = None
n = int(np.sqrt(L.shape[0]))
form = 'csr'
if has_mkl:
L = L.data + sp.csr_matrix(
(ss_args['weight'] * np.ones(n), (np.zeros(n), [nn * (n + 1)
for nn in range(n)])),
shape=(n ** 2, n ** 2))
else:
form = 'csc'
L = L.data.tocsc() + sp.csc_matrix(
(ss_args['weight'] * np.ones(n), (np.zeros(n), [nn * (n + 1)
for nn in range(n)])),
shape=(n ** 2, n ** 2))
if settings.debug:
old_band = sp_bandwidth(L)[0]
old_pro = sp_profile(L)[0]
logger.debug('Orig. NNZ: %i' % L.nnz)
if ss_args['use_rcm']:
logger.debug('Original bandwidth: %i' % old_band)
if ss_args['use_wbm']:
if settings.debug:
logger.debug('Calculating Weighted Bipartite Matching ordering...')
_wbm_start = time.time()
perm = weighted_bipartite_matching(L)
_wbm_end = time.time()
L = sp_permute(L, perm, [], form)
ss_args['info']['perm'].append('wbm')
ss_args['info']['wbm_time'] = _wbm_end - _wbm_start
if settings.debug:
wbm_band = sp_bandwidth(L)[0]
logger.debug('WBM bandwidth: %i' % wbm_band)
if ss_args['use_rcm']:
if settings.debug:
logger.debug('Calculating Reverse Cuthill-Mckee ordering...')
_rcm_start = time.time()
perm2 = reverse_cuthill_mckee(L)
_rcm_end = time.time()
rev_perm = np.argsort(perm2)
L = sp_permute(L, perm2, perm2, form)
ss_args['info']['perm'].append('rcm')
ss_args['info']['rcm_time'] = _rcm_end - _rcm_start
if settings.debug:
rcm_band = sp_bandwidth(L)[0]
rcm_pro = sp_profile(L)[0]
logger.debug('RCM bandwidth: %i' % rcm_band)
logger.debug('Bandwidth reduction factor: %f' %
(old_band / rcm_band))
logger.debug('Profile reduction factor: %f' %
(old_pro / rcm_pro))
L.sort_indices()
return L, perm, perm2, rev_perm, ss_args
def steady(L, maxiter=10, tol=1e-12, itertol=1e-15, method='solve',
use_precond=False):
"""
Deprecated. See steadystate instead.
"""
message = "steady has been deprecated, use steadystate instead"
warnings.warn(message, DeprecationWarning)
return steadystate(L, [], maxiter=maxiter, tol=tol, use_precond=use_precond)
def _steadystate_direct_sparse(L, ss_args):
"""
Direct solver that uses scipy sparse matrices
"""
if settings.debug:
logger.debug('Starting direct LU solver.')
dims = L.dims[0]
n = int(np.sqrt(L.shape[0]))
b = np.zeros(n ** 2, dtype=complex)
b[0] = ss_args['weight']
if settings.has_mkl:
has_mkl = 1
else:
has_mkl = 0
L, perm, perm2, rev_perm, ss_args = _steadystate_LU_liouvillian(
L, ss_args, has_mkl)
if np.any(perm):
b = b[np.ix_(perm,)]
if np.any(perm2):
b = b[np.ix_(perm2,)]
ss_args['info']['permc_spec'] = ss_args['permc_spec']
ss_args['info']['drop_tol'] = ss_args['drop_tol']
ss_args['info']['diag_pivot_thresh'] = ss_args['diag_pivot_thresh']
ss_args['info']['fill_factor'] = ss_args['fill_factor']
ss_args['info']['ILU_MILU'] = ss_args['ILU_MILU']
if not has_mkl:
# Use superLU solver
orig_nnz = L.nnz
_direct_start = time.time()
lu = splu(L, permc_spec=ss_args['permc_spec'],
diag_pivot_thresh=ss_args['diag_pivot_thresh'],
options=dict(ILU_MILU=ss_args['ILU_MILU']))
v = lu.solve(b)
_direct_end = time.time()
ss_args['info']['solution_time'] = _direct_end - _direct_start
if (settings.debug or ss_args['return_info']) and _scipy_check:
L_nnz = lu.L.nnz
U_nnz = lu.U.nnz
ss_args['info']['l_nnz'] = L_nnz
ss_args['info']['u_nnz'] = U_nnz
ss_args['info']['lu_fill_factor'] = (L_nnz + U_nnz) / L.nnz
if settings.debug:
logger.debug('L NNZ: %i ; U NNZ: %i' % (L_nnz, U_nnz))
logger.debug('Fill factor: %f' % ((L_nnz + U_nnz) / orig_nnz))
else: # Use MKL solver
if len(ss_args['info']['perm']) != 0:
in_perm = np.arange(n**2, dtype=np.int32)
else:
in_perm = None
_direct_start = time.time()
v = mkl_spsolve(L, b, perm=in_perm, verbose=ss_args['verbose'])
_direct_end = time.time()
ss_args['info']['solution_time'] = _direct_end - _direct_start
if ss_args['return_info']:
ss_args['info']['residual_norm'] = la.norm(b - L * v, np.inf)
if ss_args['use_rcm']:
v = v[np.ix_(rev_perm,)]
data = dense2D_to_fastcsr_fmode(vec2mat(v), n, n)
data = 0.5 * (data + data.H)
if ss_args['return_info']:
return Qobj(data, dims=dims, isherm=True), ss_args['info']
else:
return Qobj(data, dims=dims, isherm=True)
def _steadystate_direct_dense(L, ss_args):
"""
Direct solver that use numpy dense matrices. Suitable for
small system, with a few states.
"""
if settings.debug:
logger.debug('Starting direct dense solver.')
dims = L.dims[0]
n = int(np.sqrt(L.shape[0]))
b = np.zeros(n ** 2)
b[0] = ss_args['weight']
L = L.data.todense()
L[0, :] = np.diag(ss_args['weight'] * np.ones(n)).reshape((1, n ** 2))
_dense_start = time.time()
v = np.linalg.solve(L, b)
_dense_end = time.time()
ss_args['info']['solution_time'] = _dense_end - _dense_start
if ss_args['return_info']:
ss_args['info']['residual_norm'] = la.norm(b - L * v, np.inf)
data = vec2mat(v)
data = 0.5 * (data + data.conj().T)
return Qobj(data, dims=dims, isherm=True)
def _steadystate_eigen(L, ss_args):
"""
Internal function for solving the steady state problem by
finding the eigenvector corresponding to the zero eigenvalue
of the Liouvillian using ARPACK.
"""
ss_args['info'].pop('weight', None)
if settings.debug:
logger.debug('Starting Eigen solver.')
dims = L.dims[0]
L = L.data.tocsc()
if ss_args['use_rcm']:
ss_args['info']['perm'].append('rcm')
if settings.debug:
old_band = sp_bandwidth(L)[0]
logger.debug('Original bandwidth: %i' % old_band)
perm = reverse_cuthill_mckee(L)
rev_perm = np.argsort(perm)
L = sp_permute(L, perm, perm, 'csc')
if settings.debug:
rcm_band = sp_bandwidth(L)[0]
logger.debug('RCM bandwidth: %i' % rcm_band)
logger.debug('Bandwidth reduction factor: %f' %
(old_band / rcm_band))
_eigen_start = time.time()
eigval, eigvec = eigs(L, k=1, sigma=1e-15, tol=ss_args['tol'],
which='LM', maxiter=ss_args['maxiter'])
_eigen_end = time.time()
ss_args['info']['solution_time'] = _eigen_end - _eigen_start
if ss_args['return_info']:
ss_args['info']['residual_norm'] = la.norm(L * eigvec, np.inf)
if ss_args['use_rcm']:
eigvec = eigvec[np.ix_(rev_perm,)]
_temp = vec2mat(eigvec)
data = dense2D_to_fastcsr_fmode(_temp, _temp.shape[0], _temp.shape[1])
data = 0.5 * (data + data.H)
out = Qobj(data, dims=dims, isherm=True)
if ss_args['return_info']:
return out / out.tr(), ss_args['info']
else:
return out / out.tr()
def _iterative_precondition(A, n, ss_args):
"""
Internal function for preconditioning the steadystate problem for use
with iterative solvers.
"""
if settings.debug:
logger.debug('Starting preconditioner.')
_precond_start = time.time()
try:
P = spilu(A, permc_spec=ss_args['permc_spec'],
drop_tol=ss_args['drop_tol'],
diag_pivot_thresh=ss_args['diag_pivot_thresh'],
fill_factor=ss_args['fill_factor'],
options=dict(ILU_MILU=ss_args['ILU_MILU']))
def P_x(x): return P.solve(x)
M = LinearOperator((n ** 2, n ** 2), matvec=P_x)
_precond_end = time.time()
ss_args['info']['permc_spec'] = ss_args['permc_spec']
ss_args['info']['drop_tol'] = ss_args['drop_tol']
ss_args['info']['diag_pivot_thresh'] = ss_args['diag_pivot_thresh']
ss_args['info']['fill_factor'] = ss_args['fill_factor']
ss_args['info']['ILU_MILU'] = ss_args['ILU_MILU']
ss_args['info']['precond_time'] = _precond_end - _precond_start
if settings.debug or ss_args['return_info']:
if settings.debug:
logger.debug('Preconditioning succeeded.')
logger.debug('Precond. time: %f' %
(_precond_end - _precond_start))
if _scipy_check:
L_nnz = P.L.nnz
U_nnz = P.U.nnz
ss_args['info']['l_nnz'] = L_nnz
ss_args['info']['u_nnz'] = U_nnz
ss_args['info']['ilu_fill_factor'] = (L_nnz + U_nnz) / A.nnz
e = np.ones(n ** 2, dtype=int)
condest = la.norm(M * e, np.inf)
ss_args['info']['ilu_condest'] = condest
if settings.debug:
logger.debug('L NNZ: %i ; U NNZ: %i' % (L_nnz, U_nnz))
logger.debug('Fill factor: %f' % ((L_nnz + U_nnz) / A.nnz))
logger.debug('iLU condest: %f' % condest)
except:
raise Exception("Failed to build preconditioner. Try increasing " +
"fill_factor and/or drop_tol.")
return M, ss_args
def _steadystate_iterative(L, ss_args):
"""
Iterative steady state solver using the GMRES, LGMRES, or BICGSTAB
algorithm and a sparse incomplete LU preconditioner.
"""
ss_iters = {'iter': 0}
def _iter_count(r):
ss_iters['iter'] += 1
return
if settings.debug:
logger.debug('Starting %s solver.' % ss_args['method'])
dims = L.dims[0]
n = int(np.sqrt(L.shape[0]))
b = np.zeros(n ** 2)
b[0] = ss_args['weight']
L, perm, perm2, rev_perm, ss_args = _steadystate_LU_liouvillian(L, ss_args)
if np.any(perm):
b = b[np.ix_(perm,)]
if np.any(perm2):
b = b[np.ix_(perm2,)]
use_solver(assumeSortedIndices=True)
if ss_args['M'] is None and ss_args['use_precond']:
ss_args['M'], ss_args = _iterative_precondition(L, n, ss_args)
if ss_args['M'] is None:
warnings.warn("Preconditioning failed. Continuing without.",
UserWarning)
# Select iterative solver type
_iter_start = time.time()
if ss_args['method'] == 'iterative-gmres':
v, check = gmres(L, b, tol=ss_args['tol'], M=ss_args['M'],
x0=ss_args['x0'], restart=ss_args['restart'],
maxiter=ss_args['maxiter'], callback=_iter_count)
elif ss_args['method'] == 'iterative-lgmres':
v, check = lgmres(L, b, tol=ss_args['tol'], M=ss_args['M'],
x0=ss_args['x0'], maxiter=ss_args['maxiter'],
callback=_iter_count)
elif ss_args['method'] == 'iterative-bicgstab':
v, check = bicgstab(L, b, tol=ss_args['tol'], M=ss_args['M'],
x0=ss_args['x0'],
maxiter=ss_args['maxiter'], callback=_iter_count)
else:
raise Exception("Invalid iterative solver method.")
_iter_end = time.time()
ss_args['info']['iter_time'] = _iter_end - _iter_start
if ss_args['info']['precond_time'] is not None:
ss_args['info']['solution_time'] = (ss_args['info']['iter_time'] +
ss_args['info']['precond_time'])
else:
ss_args['info']['solution_time'] = ss_args['info']['iter_time']
ss_args['info']['iterations'] = ss_iters['iter']
if ss_args['return_info']:
ss_args['info']['residual_norm'] = la.norm(b - L * v, np.inf)
if settings.debug:
logger.debug('Number of Iterations: %i' % ss_iters['iter'])
logger.debug('Iteration. time: %f' % (_iter_end - _iter_start))
if check > 0:
raise Exception("Steadystate error: Did not reach tolerance after " +
str(ss_args['maxiter']) + " steps." +
"\nResidual norm: " +
str(ss_args['info']['residual_norm']))
elif check < 0:
raise Exception(
"Steadystate error: Failed with fatal error: " + str(check) + ".")
if ss_args['use_rcm']:
v = v[np.ix_(rev_perm,)]
data = vec2mat(v)
data = 0.5 * (data + data.conj().T)
if ss_args['return_info']:
return Qobj(data, dims=dims, isherm=True), ss_args['info']
else:
return Qobj(data, dims=dims, isherm=True)
def _steadystate_svd_dense(L, ss_args):
"""
Find the steady state(s) of an open quantum system by solving for the
nullspace of the Liouvillian.
"""
ss_args['info'].pop('weight', None)
atol = 1e-12
rtol = 1e-12
if settings.debug:
logger.debug('Starting SVD solver.')
_svd_start = time.time()
u, s, vh = svd(L.full(), full_matrices=False)
tol = max(atol, rtol * s[0])
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
_svd_end = time.time()
ss_args['info']['solution_time'] = _svd_end - _svd_start
if ss_args['all_states']:
rhoss_list = []
for n in range(ns.shape[1]):
rhoss = Qobj(vec2mat(ns[:, n]), dims=L.dims[0])
rhoss_list.append(rhoss / rhoss.tr())
if ss_args['return_info']:
return rhoss_list, ss_args['info']
else:
if ss_args['return_info']:
return rhoss_list, ss_args['info']
else:
return rhoss_list
else:
rhoss = Qobj(vec2mat(ns[:, 0]), dims=L.dims[0])
return rhoss / rhoss.tr()
def _steadystate_power_liouvillian(L, ss_args, has_mkl=0):
"""Creates modified Liouvillian for power based SS methods.
"""
perm = None
perm2 = None
rev_perm = None
n = L.shape[0]
if has_mkl:
L = L.data - (1e-15) * sp.eye(n, n, format='csr')
kind = 'csr'
else:
L = L.data.tocsc() - (1e-15) * sp.eye(n, n, format='csc')
kind = 'csc'
orig_nnz = L.nnz
if settings.debug:
old_band = sp_bandwidth(L)[0]
old_pro = sp_profile(L)[0]
logger.debug('Original bandwidth: %i' % old_band)
logger.debug('Original profile: %i' % old_pro)
if ss_args['use_wbm']:
if settings.debug:
logger.debug('Calculating Weighted Bipartite Matching ordering...')
_wbm_start = time.time()
perm = weighted_bipartite_matching(L)
_wbm_end = time.time()
L = sp_permute(L, perm, [], kind)
ss_args['info']['perm'].append('wbm')
ss_args['info']['wbm_time'] = _wbm_end - _wbm_start
if settings.debug:
wbm_band = sp_bandwidth(L)[0]
wbm_pro = sp_profile(L)[0]
logger.debug('WBM bandwidth: %i' % wbm_band)
logger.debug('WBM profile: %i' % wbm_pro)
if ss_args['use_rcm']:
if settings.debug:
logger.debug('Calculating Reverse Cuthill-Mckee ordering...')
ss_args['info']['perm'].append('rcm')
_rcm_start = time.time()
perm2 = reverse_cuthill_mckee(L)
_rcm_end = time.time()
ss_args['info']['rcm_time'] = _rcm_end - _rcm_start
rev_perm = np.argsort(perm2)
L = sp_permute(L, perm2, perm2, kind)
if settings.debug:
new_band = sp_bandwidth(L)[0]
new_pro = sp_profile(L)[0]
logger.debug('RCM bandwidth: %i' % new_band)
logger.debug('Bandwidth reduction factor: %f' %
(old_band / new_band))
logger.debug('RCM profile: %i' % new_pro)
logger.debug('Profile reduction factor: %f' % (old_pro / new_pro))
L.sort_indices()
return L, perm, perm2, rev_perm, ss_args
def _steadystate_power(L, ss_args):
"""
Inverse power method for steady state solving.
"""
ss_args['info'].pop('weight', None)
if settings.debug:
logger.debug('Starting iterative inverse-power method solver.')
tol = ss_args['tol']
maxiter = ss_args['maxiter']
use_solver(assumeSortedIndices=True)
rhoss = Qobj()
sflag = issuper(L)
if sflag:
rhoss.dims = L.dims[0]
else:
rhoss.dims = [L.dims[0], 1]
n = L.shape[0]
# Build Liouvillian
if settings.has_mkl and ss_args['method'] == 'power':
has_mkl = 1
else:
has_mkl = 0
L, perm, perm2, rev_perm, ss_args = _steadystate_power_liouvillian(L,
ss_args, has_mkl)
orig_nnz = L.nnz
# start with all ones as RHS
v = np.ones(n, dtype=complex)
if ss_args['use_rcm']:
v = v[np.ix_(perm2,)]
# Do preconditioning
if ss_args['M'] is None and ss_args['use_precond'] and \
ss_args['method'] in ['power-gmres',
'power-lgmres', 'power-bicgstab']:
ss_args['M'], ss_args = _iterative_precondition(
L, int(np.sqrt(n)), ss_args)
if ss_args['M'] is None:
warnings.warn("Preconditioning failed. Continuing without.",
UserWarning)
ss_iters = {'iter': 0}
def _iter_count(r):
ss_iters['iter'] += 1
return
_power_start = time.time()
# Get LU factors
if ss_args['method'] == 'power':
if settings.has_mkl:
lu = mkl_splu(L)
else:
lu = splu(L, permc_spec=ss_args['permc_spec'],
diag_pivot_thresh=ss_args['diag_pivot_thresh'],
options=dict(ILU_MILU=ss_args['ILU_MILU']))
if settings.debug and _scipy_check:
L_nnz = lu.L.nnz
U_nnz = lu.U.nnz
logger.debug('L NNZ: %i ; U NNZ: %i' % (L_nnz, U_nnz))
logger.debug('Fill factor: %f' % ((L_nnz + U_nnz) / orig_nnz))
it = 0
_tol = max(ss_args['tol'] / 10, 1e-15) # Should make this user accessible
while (la.norm(L * v, np.inf) > tol) and (it < maxiter):
if ss_args['method'] == 'power':
v = lu.solve(v)
elif ss_args['method'] == 'power-gmres':
v, check = gmres(L, v, tol=_tol, M=ss_args['M'],
x0=ss_args['x0'], restart=ss_args['restart'],
maxiter=ss_args['maxiter'], callback=_iter_count)
elif ss_args['method'] == 'power-lgmres':
v, check = lgmres(L, v, tol=_tol, M=ss_args['M'],
x0=ss_args['x0'], maxiter=ss_args['maxiter'],
callback=_iter_count)
elif ss_args['method'] == 'power-bicgstab':
v, check = bicgstab(L, v, tol=_tol, M=ss_args['M'],
x0=ss_args['x0'],
maxiter=ss_args['maxiter'], callback=_iter_count)
else:
raise Exception("Invalid iterative solver method.")
v = v / la.norm(v, np.inf)
it += 1
if ss_args['method'] == 'power' and settings.has_mkl:
lu.delete()
if it >= maxiter:
raise Exception('Failed to find steady state after ' +
str(maxiter) + ' iterations')
_power_end = time.time()
ss_args['info']['solution_time'] = _power_end - _power_start
ss_args['info']['iterations'] = it
if ss_args['return_info']:
ss_args['info']['residual_norm'] = la.norm(L * v, np.inf)
if settings.debug:
logger.debug('Number of iterations: %i' % it)
if ss_args['use_rcm']:
v = v[np.ix_(rev_perm,)]
# normalise according to type of problem
if sflag:
trow = v[::rhoss.shape[0] + 1]
data = v / np.sum(trow)
else:
data = data / la.norm(v)
data = dense2D_to_fastcsr_fmode(
vec2mat(data), rhoss.shape[0], rhoss.shape[0])
rhoss.data = 0.5 * (data + data.H)
rhoss.isherm = True
if ss_args['return_info']:
return rhoss, ss_args['info']
else:
return rhoss
def build_preconditioner(A, c_op_list=[], **kwargs):
"""Constructs a iLU preconditioner necessary for solving for
the steady state density matrix using the iterative linear solvers
in the 'steadystate' function.
Parameters
----------
A : qobj
A Hamiltonian or Liouvillian operator.
c_op_list : list
A list of collapse operators.
return_info : bool, optional, default = False
Return a dictionary of solver-specific infomation about the
solution and how it was obtained.
use_rcm : bool, optional, default = False
Use reverse Cuthill-Mckee reordering to minimize fill-in in the
LU factorization of the Liouvillian.
use_wbm : bool, optional, default = False
Use Weighted Bipartite Matching reordering to make the Liouvillian
diagonally dominant. This is useful for iterative preconditioners
only, and is set to ``True`` by default when finding a preconditioner.
weight : float, optional
Sets the size of the elements used for adding the unity trace condition
to the linear solvers. This is set to the average abs value of the
Liouvillian elements if not specified by the user.
method : str, default = 'iterative'
Tells the preconditioner what type of Liouvillian to build for
iLU factorization. For direct iterative methods use 'iterative'.
For power iterative methods use 'power'.
permc_spec : str, optional, default='COLAMD'
Column ordering used internally by superLU for the
'direct' LU decomposition method. Options include 'COLAMD' and
'NATURAL'. If using RCM then this is set to 'NATURAL' automatically
unless explicitly specified.
fill_factor : float, optional, default = 100
Specifies the fill ratio upper bound (>=1) of the iLU
preconditioner. Lower values save memory at the cost of longer
execution times and a possible singular factorization.
drop_tol : float, optional, default = 1e-4
Sets the threshold for the magnitude of preconditioner
elements that should be dropped. Can be reduced for a courser
factorization at the cost of an increased number of iterations, and a
possible singular factorization.
diag_pivot_thresh : float, optional, default = None
Sets the threshold between [0,1] for which diagonal
elements are considered acceptable pivot points when using a
preconditioner. A value of zero forces the pivot to be the diagonal
element.
ILU_MILU : str, optional, default = 'smilu_2'
Selects the incomplete LU decomposition method algoithm used in
creating the preconditoner. Should only be used by advanced users.
Returns
-------
lu : object
Returns a SuperLU object representing iLU preconditioner.
info : dict, optional
Dictionary containing solver-specific information.
"""
ss_args = _default_steadystate_args()
ss_args['method'] = 'iterative'
for key in kwargs.keys():
if key in ss_args.keys():
ss_args[key] = kwargs[key]
else:
raise Exception("Invalid keyword argument '" + key +
"' passed to steadystate.")
# Set column perm to NATURAL if using RCM and not specified by user
if ss_args['use_rcm'] and ('permc_spec' not in kwargs.keys()):
ss_args['permc_spec'] = 'NATURAL'
L = _steadystate_setup(A, c_op_list)
# Set weight parameter to avg abs val in L if not set explicitly
if 'weight' not in kwargs.keys():
ss_args['weight'] = np.mean(np.abs(L.data.data.max()))
ss_args['info']['weight'] = ss_args['weight']
n = int(np.sqrt(L.shape[0]))
if ss_args['method'] == 'iterative':
L, perm, perm2, rev_perm, ss_args = _steadystate_LU_liouvillian(
L, ss_args)
elif ss_args['method'] == 'power':
L, perm, perm2, rev_perm, ss_args = _steadystate_power_liouvillian(
L, ss_args)
else:
raise Exception("Invalid preconditioning method.")
M, ss_args = _iterative_precondition(L, n, ss_args)
if ss_args['return_info']:
return M, ss_args['info']
else:
return M
def _pseudo_inverse_dense(L, rhoss, w=None, **pseudo_args):
"""
Internal function for computing the pseudo inverse of an Liouvillian using
dense matrix methods. See pseudo_inverse for details.
"""
rho_vec = np.transpose(mat2vec(rhoss.full()))
tr_mat = tensor([identity(n) for n in L.dims[0][0]])
tr_vec = np.transpose(mat2vec(tr_mat.full()))
N = np.prod(L.dims[0][0])
I = np.identity(N * N)
P = np.kron(np.transpose(rho_vec), tr_vec)
Q = I - P
if w is None:
L = L
else:
L = 1.0j * w * spre(tr_mat) + L
# It's possible that there's an error here!
if pseudo_args['method'] == 'direct':
try:
LIQ = np.linalg.solve(L.full(), Q)
except:
LIQ = np.linalg.lstsq(L.full(), Q)[0]
R = np.dot(Q, LIQ)
return Qobj(R, dims=L.dims)
elif pseudo_args['method'] == 'numpy':
return Qobj(np.dot(Q, np.dot(np.linalg.pinv(L.full()), Q)), dims=L.dims)
elif pseudo_args['method'] == 'scipy':
# return Qobj(la.pinv(L.full()), dims=L.dims)
return Qobj(np.dot(Q, np.dot(la.pinv(L.full()), Q)), dims=L.dims)
elif pseudo_args['method'] == 'scipy2':
# return Qobj(la.pinv2(L.full()), dims=L.dims)
return Qobj(np.dot(Q, np.dot(la.pinv2(L.full()), Q)), dims=L.dims)
else:
raise ValueError("Unsupported method '%s'. Use 'direct' or 'numpy'" %
method)
def _pseudo_inverse_sparse(L, rhoss, w=None, **pseudo_args):
"""
Internal function for computing the pseudo inverse of an Liouvillian using
sparse matrix methods. See pseudo_inverse for details.
"""
N = np.prod(L.dims[0][0])
rhoss_vec = operator_to_vector(rhoss)
tr_op = tensor([identity(n) for n in L.dims[0][0]])
tr_op_vec = operator_to_vector(tr_op)
P = zcsr_kron(rhoss_vec.data, tr_op_vec.data.T)
I = sp.eye(N * N, N * N, format='csr')
Q = I - P
if w is None:
L = 1.0j * (1e-15) * spre(tr_op) + L
else:
if w != 0.0:
L = 1.0j * w * spre(tr_op) + L
else:
L = 1.0j * (1e-15) * spre(tr_op) + L
if pseudo_args['use_rcm']:
perm = reverse_cuthill_mckee(L.data)
A = sp_permute(L.data, perm, perm)
Q = sp_permute(Q, perm, perm)
else:
if not settings.has_mkl:
A = L.data.tocsc()
A.sort_indices()
if pseudo_args['method'] == 'splu':
if settings.has_mkl:
A = L.data.tocsr()
A.sort_indices()
LIQ = mkl_spsolve(A, Q.toarray())
else:
lu = sp.linalg.splu(A, permc_spec=pseudo_args['permc_spec'],
diag_pivot_thresh=pseudo_args['diag_pivot_thresh'],
options=dict(ILU_MILU=pseudo_args['ILU_MILU']))
LIQ = lu.solve(Q.toarray())
elif pseudo_args['method'] == 'spilu':
lu = sp.linalg.spilu(A, permc_spec=pseudo_args['permc_spec'],
fill_factor=pseudo_args['fill_factor'],
drop_tol=pseudo_args['drop_tol'])
LIQ = lu.solve(Q.toarray())
else:
raise ValueError("unsupported method '%s'" % method)
R = sp.csr_matrix(Q * LIQ)
if pseudo_args['use_rcm']:
rev_perm = np.argsort(perm)
R = sp_permute(R, rev_perm, rev_perm, 'csr')
return Qobj(R, dims=L.dims)
def pseudo_inverse(L, rhoss=None, w=None, sparse=True, **kwargs):
"""
Compute the pseudo inverse for a Liouvillian superoperator, optionally
given its steady state density matrix (which will be computed if not given).
Returns
-------
L : Qobj
A Liouvillian superoperator for which to compute the pseudo inverse.
rhoss : Qobj
A steadystate density matrix as Qobj instance, for the Liouvillian
superoperator L.
w : double
frequency at which to evaluate pseudo-inverse. Can be zero for dense systems
and large sparse systems. Small sparse systems can fail for zero frequencies.
sparse : bool
Flag that indicate whether to use sparse or dense matrix methods when
computing the pseudo inverse.
method : string
Name of method to use. For sparse=True, allowed values are 'spsolve',
'splu' and 'spilu'. For sparse=False, allowed values are 'direct' and
'numpy'.
kwargs : dictionary
Additional keyword arguments for setting parameters for solver methods.
Returns
-------
R : Qobj
Returns a Qobj instance representing the pseudo inverse of L.
Note
----
In general the inverse of a sparse matrix will be dense. If you
are applying the inverse to a density matrix then it is better to
cast the problem as an Ax=b type problem where the explicit calculation
of the inverse is not required. See page 67 of "Electrons in nanostructures"
C. Flindt, PhD Thesis available online:
http://orbit.dtu.dk/fedora/objects/orbit:82314/datastreams/file_4732600/content
Note also that the definition of the pseudo-inverse herein is different
from numpys pinv() alone, as it includes pre and post projection onto
the subspace defined by the projector Q.
"""
pseudo_args = _default_steadystate_args()
for key in kwargs.keys():
if key in pseudo_args.keys():
pseudo_args[key] = kwargs[key]
else:
raise Exception(
"Invalid keyword argument '" + key + "' passed to pseudo_inverse.")
if 'method' not in kwargs.keys():
pseudo_args['method'] = 'splu'
# Set column perm to NATURAL if using RCM and not specified by user
if pseudo_args['use_rcm'] and ('permc_spec' not in kwargs.keys()):
pseudo_args['permc_spec'] = 'NATURAL'
if rhoss is None:
rhoss = steadystate(L, **pseudo_args)
if sparse:
return _pseudo_inverse_sparse(L, rhoss, w=w, **pseudo_args)
else:
pseudo_args['method'] = pseudo_args['method'] if pseudo_args['method'] != 'splu' else 'direct'
return _pseudo_inverse_dense(L, rhoss, w=w, **pseudo_args)
| {
"content_hash": "392034e52313c0195904445d8280f4df",
"timestamp": "",
"source": "github",
"line_count": 1059,
"max_line_length": 102,
"avg_line_length": 38.25023607176582,
"alnum_prop": 0.5801713284123732,
"repo_name": "diego-bernal/quantum",
"id": "45a9935188f9859457092b314576102cea0acd7e",
"size": "43912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantum/steadystate.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "146874"
}
],
"symlink_target": ""
} |
"""Input layer code (`Input` and `InputLayer`).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.InputLayer')
class InputLayer(base_layer.Layer):
"""Layer to be used as an entry point into a Network (a graph of layers).
It can either wrap an existing tensor (pass an `input_tensor` argument)
or create its a placeholder tensor (pass arguments `input_shape`, and
optionally, `dtype`).
It is generally recommend to use the functional layer API via `Input`,
(which creates an `InputLayer`) without directly using `InputLayer`.
Arguments:
input_shape: Shape tuple (not including the batch axis), or `TensorShape`
instance (not including the batch axis).
batch_size: Optional input batch size (integer or None).
dtype: Datatype of the input.
input_tensor: Optional tensor to use as layer input
instead of creating a placeholder.
sparse: Boolean, whether the placeholder created
is meant to be sparse.
name: Name of the layer (string).
"""
def __init__(self,
input_shape=None,
batch_size=None,
dtype=None,
input_tensor=None,
sparse=False,
name=None,
**kwargs):
if 'batch_input_shape' in kwargs:
batch_input_shape = kwargs.pop('batch_input_shape')
if input_shape and batch_input_shape:
raise ValueError('Only provide the input_shape OR '
'batch_input_shape argument to '
'InputLayer, not both at the same time.')
batch_size = batch_input_shape[0]
input_shape = batch_input_shape[1:]
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if not name:
prefix = 'input'
name = prefix + '_' + str(K.get_uid(prefix))
if not dtype:
if input_tensor is None:
dtype = K.floatx()
else:
dtype = K.dtype(input_tensor)
super(InputLayer, self).__init__(dtype=dtype, name=name)
self.built = True
self.sparse = sparse
self.batch_size = batch_size
if isinstance(input_shape, tensor_shape.TensorShape):
input_shape = tuple(input_shape.as_list())
if input_tensor is None:
if input_shape is not None:
batch_input_shape = (batch_size,) + tuple(input_shape)
else:
batch_input_shape = None
if context.executing_eagerly():
# In eager mode, create a temporary placeholder to call the layer on.
input_tensor = base_layer.DeferredTensor( # pylint: disable=protected-access
shape=batch_input_shape,
dtype=dtype,
name=self.name)
else:
# In graph mode, create a graph placeholder to call the layer on.
if sparse:
input_tensor = array_ops.sparse_placeholder(
shape=batch_input_shape,
dtype=dtype,
name=self.name)
else:
input_tensor = array_ops.placeholder(
shape=batch_input_shape,
dtype=dtype,
name=self.name)
# For compatibility with Keras API.
self.is_placeholder = True
self._batch_input_shape = batch_input_shape
else:
# For compatibility with Keras API.
self.is_placeholder = False
self._batch_input_shape = tuple(input_tensor.get_shape().as_list())
# Create an input node to add to self.outbound_node
# and set output_tensors' _keras_history.
input_tensor._keras_history = (self, 0, 0) # pylint: disable=protected-access
base_layer.Node(
self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=[input_tensor],
output_tensors=[input_tensor])
def get_config(self):
config = {
'batch_input_shape': self._batch_input_shape,
'dtype': self.dtype,
'sparse': self.sparse,
'name': self.name
}
return config
@tf_export('keras.layers.Input', 'keras.Input')
def Input( # pylint: disable=invalid-name
shape=None,
batch_size=None,
name=None,
dtype=None,
sparse=False,
tensor=None,
**kwargs):
"""`Input()` is used to instantiate a Keras tensor.
A Keras tensor is a tensor object from the underlying backend
(Theano or TensorFlow), which we augment with certain
attributes that allow us to build a Keras model
just by knowing the inputs and outputs of the model.
For instance, if a, b and c are Keras tensors,
it becomes possible to do:
`model = Model(input=[a, b], output=c)`
The added Keras attribute is:
`_keras_history`: Last layer applied to the tensor.
the entire layer graph is retrievable from that layer,
recursively.
Arguments:
shape: A shape tuple (integers), not including the batch size.
For instance, `shape=(32,)` indicates that the expected input
will be batches of 32-dimensional vectors.
batch_size: optional static batch size (integer).
name: An optional name string for the layer.
Should be unique in a model (do not reuse the same name twice).
It will be autogenerated if it isn't provided.
dtype: The data type expected by the input, as a string
(`float32`, `float64`, `int32`...)
sparse: A boolean specifying whether the placeholder
to be created is sparse.
tensor: Optional existing tensor to wrap into the `Input` layer.
If set, the layer will not create a placeholder tensor.
**kwargs: deprecated arguments support.
Returns:
A tensor.
Example:
```python
# this is a logistic regression in Keras
x = Input(shape=(32,))
y = Dense(16, activation='softmax')(x)
model = Model(x, y)
```
Raises:
ValueError: in case of invalid arguments.
"""
if 'batch_shape' in kwargs:
batch_shape = kwargs.pop('batch_shape')
if shape and batch_shape:
raise ValueError('Only provide the shape OR '
'batch_shape argument to '
'Input, not both at the same time.')
batch_size = batch_shape[0]
shape = batch_shape[1:]
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if dtype is None:
dtype = K.floatx()
if not shape and tensor is None:
raise ValueError('Please provide to Input either a `shape`'
' or a `tensor` argument. Note that '
'`shape` does not include the batch '
'dimension.')
input_layer = InputLayer(
input_shape=shape,
batch_size=batch_size,
name=name,
dtype=dtype,
sparse=sparse,
input_tensor=tensor)
# Return tensor including `_keras_history`.
# Note that in this case train_output and test_output are the same pointer.
outputs = input_layer._inbound_nodes[0].output_tensors
if len(outputs) == 1:
return outputs[0]
else:
return outputs
| {
"content_hash": "9ae2b0345bef47eff720854ceaa56994",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 85,
"avg_line_length": 34.27906976744186,
"alnum_prop": 0.6316146540027137,
"repo_name": "nburn42/tensorflow",
"id": "b04dc3c60be2a9e13cb1bc56ef12f0de36ed105c",
"size": "8094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/engine/input_layer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "341132"
},
{
"name": "C++",
"bytes": "39824558"
},
{
"name": "CMake",
"bytes": "194702"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "590137"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33704964"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "426212"
}
],
"symlink_target": ""
} |
import sys
from oslo.config import cfg
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.common import constants as q_const
from neutron.common import exceptions as q_exc
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import extraroute_db
from neutron.db import l3_gwmode_db
from neutron.db import l3_rpc_base
from neutron.db import portbindings_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import proxy
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.openvswitch.common import config # noqa
from neutron.plugins.openvswitch.common import constants
from neutron.plugins.openvswitch import ovs_db_v2
LOG = logging.getLogger(__name__)
class OVSRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
l3_rpc_base.L3RpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin):
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
RPC_API_VERSION = '1.1'
def __init__(self, notifier):
self.notifier = notifier
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
@classmethod
def get_port_from_device(cls, device):
port = ovs_db_v2.get_port_from_device(device)
if port:
port['device'] = device
return port
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = ovs_db_v2.get_port(device)
if port:
binding = ovs_db_v2.get_network_binding(None, port['network_id'])
entry = {'device': device,
'network_id': port['network_id'],
'port_id': port['id'],
'admin_state_up': port['admin_state_up'],
'network_type': binding.network_type,
'segmentation_id': binding.segmentation_id,
'physical_network': binding.physical_network}
new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up']
else q_const.PORT_STATUS_DOWN)
if port['status'] != new_status:
ovs_db_v2.set_port_status(port['id'], new_status)
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
# TODO(garyk) - live migration and port status
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = ovs_db_v2.get_port(device)
if port:
entry = {'device': device,
'exists': True}
if port['status'] != q_const.PORT_STATUS_DOWN:
# Set port status to DOWN
ovs_db_v2.set_port_status(port['id'], q_const.PORT_STATUS_DOWN)
else:
entry = {'device': device,
'exists': False}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s up on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = ovs_db_v2.get_port(device)
if port:
if port['status'] != q_const.PORT_STATUS_ACTIVE:
ovs_db_v2.set_port_status(port['id'],
q_const.PORT_STATUS_ACTIVE)
else:
LOG.debug(_("%s can not be found in database"), device)
def tunnel_sync(self, rpc_context, **kwargs):
"""Update new tunnel.
Updates the datbase with the tunnel IP. All listening agents will also
be notified about the new tunnel IP.
"""
tunnel_ip = kwargs.get('tunnel_ip')
# Update the database with the IP
tunnel = ovs_db_v2.add_tunnel_endpoint(tunnel_ip)
tunnels = ovs_db_v2.get_tunnel_endpoints()
entry = dict()
entry['tunnels'] = tunnels
# Notify all other listening agents
self.notifier.tunnel_update(rpc_context, tunnel.ip_address,
tunnel.id)
# Return the list of tunnels IP's to the agent
return entry
class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
'''Agent side of the openvswitch rpc API.
API version history:
1.0 - Initial version.
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
self.topic_tunnel_update = topics.get_topic_name(topic,
constants.TUNNEL,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, network_type, segmentation_id,
physical_network):
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
network_type=network_type,
segmentation_id=segmentation_id,
physical_network=physical_network),
topic=self.topic_port_update)
def tunnel_update(self, context, tunnel_ip, tunnel_id):
self.fanout_cast(context,
self.make_msg('tunnel_update',
tunnel_ip=tunnel_ip,
tunnel_id=tunnel_id),
topic=self.topic_tunnel_update)
class OVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
agentschedulers_db.AgentSchedulerDbMixin,
portbindings_db.PortBindingMixin):
"""Implement the Neutron abstractions using Open vSwitch.
Depending on whether tunneling is enabled, either a GRE, VXLAN tunnel or
a new VLAN is created for each network. An agent is relied upon to
perform the actual OVS configuration on each host.
The provider extension is also supported. As discussed in
https://bugs.launchpad.net/neutron/+bug/1023156, this class could
be simplified, and filtering on extended attributes could be
handled, by adding support for extended attributes to the
NeutronDbPluginV2 base class. When that occurs, this class should
be updated to take advantage of it.
The port binding extension enables an external application relay
information to and from the plugin.
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
_supported_extension_aliases = ["provider", "router", "ext-gw-mode",
"binding", "quotas", "security-group",
"agent", "extraroute", "agent_scheduler"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_if_noop_driver(aliases)
self._aliases = aliases
return self._aliases
def __init__(self, configfile=None):
self.extra_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS,
portbindings.CAPABILITIES: {
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
ovs_db_v2.initialize()
self._parse_network_vlan_ranges()
ovs_db_v2.sync_vlan_allocations(self.network_vlan_ranges)
self.tenant_network_type = cfg.CONF.OVS.tenant_network_type
if self.tenant_network_type not in [constants.TYPE_LOCAL,
constants.TYPE_VLAN,
constants.TYPE_GRE,
constants.TYPE_VXLAN,
constants.TYPE_NONE]:
LOG.error(_("Invalid tenant_network_type: %s. "
"Server terminated!"),
self.tenant_network_type)
sys.exit(1)
self.enable_tunneling = cfg.CONF.OVS.enable_tunneling
self.tunnel_id_ranges = []
if self.enable_tunneling:
self._parse_tunnel_id_ranges()
ovs_db_v2.sync_tunnel_allocations(self.tunnel_id_ranges)
elif self.tenant_network_type in constants.TUNNEL_NETWORK_TYPES:
LOG.error(_("Tunneling disabled but tenant_network_type is '%s'. "
"Server terminated!"), self.tenant_network_type)
sys.exit(1)
self.setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver
)
def setup_rpc(self):
# RPC support
self.topic = topics.PLUGIN
self.conn = rpc.create_connection(new=True)
self.notifier = AgentNotifierApi(topics.AGENT)
self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.l3_agent_notifier = l3_rpc_agent_api.L3AgentNotify
self.callbacks = OVSRpcCallbacks(self.notifier)
self.dispatcher = self.callbacks.create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def _parse_network_vlan_ranges(self):
try:
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.OVS.network_vlan_ranges)
except Exception as ex:
LOG.error(_("%s. Server terminated!"), ex)
sys.exit(1)
LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
def _parse_tunnel_id_ranges(self):
for entry in cfg.CONF.OVS.tunnel_id_ranges:
entry = entry.strip()
try:
tun_min, tun_max = entry.split(':')
self.tunnel_id_ranges.append((int(tun_min), int(tun_max)))
except ValueError as ex:
LOG.error(_("Invalid tunnel ID range: "
"'%(range)s' - %(e)s. Server terminated!"),
{'range': entry, 'e': ex})
sys.exit(1)
LOG.info(_("Tunnel ID ranges: %s"), self.tunnel_id_ranges)
def _extend_network_dict_provider(self, context, network):
binding = ovs_db_v2.get_network_binding(context.session,
network['id'])
network[provider.NETWORK_TYPE] = binding.network_type
if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = binding.segmentation_id
elif binding.network_type == constants.TYPE_FLAT:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = None
elif binding.network_type == constants.TYPE_VLAN:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = binding.segmentation_id
elif binding.network_type == constants.TYPE_LOCAL:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = None
def _process_provider_create(self, context, attrs):
network_type = attrs.get(provider.NETWORK_TYPE)
physical_network = attrs.get(provider.PHYSICAL_NETWORK)
segmentation_id = attrs.get(provider.SEGMENTATION_ID)
network_type_set = attributes.is_attr_set(network_type)
physical_network_set = attributes.is_attr_set(physical_network)
segmentation_id_set = attributes.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return (None, None, None)
if not network_type_set:
msg = _("provider:network_type required")
raise q_exc.InvalidInput(error_message=msg)
elif network_type == constants.TYPE_FLAT:
if segmentation_id_set:
msg = _("provider:segmentation_id specified for flat network")
raise q_exc.InvalidInput(error_message=msg)
else:
segmentation_id = constants.FLAT_VLAN_ID
elif network_type == constants.TYPE_VLAN:
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise q_exc.InvalidInput(error_message=msg)
if not utils.is_valid_vlan_tag(segmentation_id):
msg = (_("provider:segmentation_id out of range "
"(%(min_id)s through %(max_id)s)") %
{'min_id': q_const.MIN_VLAN_TAG,
'max_id': q_const.MAX_VLAN_TAG})
raise q_exc.InvalidInput(error_message=msg)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
if not self.enable_tunneling:
msg = _("%s networks are not enabled") % network_type
raise q_exc.InvalidInput(error_message=msg)
if physical_network_set:
msg = _("provider:physical_network specified for %s "
"network") % network_type
raise q_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise q_exc.InvalidInput(error_message=msg)
elif network_type == constants.TYPE_LOCAL:
if physical_network_set:
msg = _("provider:physical_network specified for local "
"network")
raise q_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if segmentation_id_set:
msg = _("provider:segmentation_id specified for local "
"network")
raise q_exc.InvalidInput(error_message=msg)
else:
segmentation_id = None
else:
msg = _("provider:network_type %s not supported") % network_type
raise q_exc.InvalidInput(error_message=msg)
if network_type in [constants.TYPE_VLAN, constants.TYPE_FLAT]:
if physical_network_set:
if physical_network not in self.network_vlan_ranges:
msg = _("Unknown provider:physical_network "
"%s") % physical_network
raise q_exc.InvalidInput(error_message=msg)
elif 'default' in self.network_vlan_ranges:
physical_network = 'default'
else:
msg = _("provider:physical_network required")
raise q_exc.InvalidInput(error_message=msg)
return (network_type, physical_network, segmentation_id)
def create_network(self, context, network):
(network_type, physical_network,
segmentation_id) = self._process_provider_create(context,
network['network'])
session = context.session
#set up default security groups
tenant_id = self._get_tenant_id_for_create(
context, network['network'])
self._ensure_default_security_group(context, tenant_id)
with session.begin(subtransactions=True):
if not network_type:
# tenant network
network_type = self.tenant_network_type
if network_type == constants.TYPE_NONE:
raise q_exc.TenantNetworksDisabled()
elif network_type == constants.TYPE_VLAN:
(physical_network,
segmentation_id) = ovs_db_v2.reserve_vlan(session)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
segmentation_id = ovs_db_v2.reserve_tunnel(session)
# no reservation needed for TYPE_LOCAL
else:
# provider network
if network_type in [constants.TYPE_VLAN, constants.TYPE_FLAT]:
ovs_db_v2.reserve_specific_vlan(session, physical_network,
segmentation_id)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
ovs_db_v2.reserve_specific_tunnel(session, segmentation_id)
# no reservation needed for TYPE_LOCAL
net = super(OVSNeutronPluginV2, self).create_network(context,
network)
ovs_db_v2.add_network_binding(session, net['id'], network_type,
physical_network, segmentation_id)
self._process_l3_create(context, net, network['network'])
self._extend_network_dict_provider(context, net)
# note - exception will rollback entire transaction
LOG.debug(_("Created network: %s"), net['id'])
return net
def update_network(self, context, id, network):
provider._raise_if_updates_provider_attributes(network['network'])
session = context.session
with session.begin(subtransactions=True):
net = super(OVSNeutronPluginV2, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
self._extend_network_dict_provider(context, net)
return net
def delete_network(self, context, id):
session = context.session
with session.begin(subtransactions=True):
binding = ovs_db_v2.get_network_binding(session, id)
super(OVSNeutronPluginV2, self).delete_network(context, id)
if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
ovs_db_v2.release_tunnel(session, binding.segmentation_id,
self.tunnel_id_ranges)
elif binding.network_type in [constants.TYPE_VLAN,
constants.TYPE_FLAT]:
ovs_db_v2.release_vlan(session, binding.physical_network,
binding.segmentation_id,
self.network_vlan_ranges)
# the network_binding record is deleted via cascade from
# the network record, so explicit removal is not necessary
self.notifier.network_delete(context, id)
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
net = super(OVSNeutronPluginV2, self).get_network(context,
id, None)
self._extend_network_dict_provider(context, net)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None,
limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(OVSNeutronPluginV2,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self._extend_network_dict_provider(context, net)
return [self._fields(net, fields) for net in nets]
def create_port(self, context, port):
# Set port status as 'DOWN'. This will be updated by agent
port['port']['status'] = q_const.PORT_STATUS_DOWN
port_data = port['port']
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
port = super(OVSNeutronPluginV2, self).create_port(context, port)
self._process_portbindings_create_and_update(context,
port_data, port)
self._process_port_create_security_group(context, port, sgids)
self.notify_security_groups_member_updated(context, port)
return port
def update_port(self, context, id, port):
session = context.session
need_port_update_notify = False
with session.begin(subtransactions=True):
original_port = super(OVSNeutronPluginV2, self).get_port(
context, id)
updated_port = super(OVSNeutronPluginV2, self).update_port(
context, id, port)
need_port_update_notify = self.update_security_group_on_port(
context, id, port, original_port, updated_port)
self._process_portbindings_create_and_update(context,
port['port'],
updated_port)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
if need_port_update_notify:
binding = ovs_db_v2.get_network_binding(None,
updated_port['network_id'])
self.notifier.port_update(context, updated_port,
binding.network_type,
binding.segmentation_id,
binding.physical_network)
return updated_port
def delete_port(self, context, id, l3_port_check=True):
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
session = context.session
with session.begin(subtransactions=True):
self.disassociate_floatingips(context, id)
port = self.get_port(context, id)
self._delete_port_security_group_bindings(context, id)
super(OVSNeutronPluginV2, self).delete_port(context, id)
self.notify_security_groups_member_updated(context, port)
| {
"content_hash": "a24cee52afbad9fd82d5e71b28232bf3",
"timestamp": "",
"source": "github",
"line_count": 549,
"max_line_length": 79,
"avg_line_length": 46.26047358834244,
"alnum_prop": 0.5712091979367642,
"repo_name": "CiscoSystems/quantum",
"id": "b98892966d746d102748ffe7a5d842ddc61e43c7",
"size": "26411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/plugins/openvswitch/ovs_neutron_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "4563108"
},
{
"name": "Shell",
"bytes": "9109"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
import unittest
from tests.FileHandleContainerTestBase import FileHandleContainerTestBase
from splitviewfuse.filehandlecontainers.VirtualFileSegmentFileHandleContainer import VirtualFileSegmentFileHandleContainer
class TestVirtualFileSegmentFileHandleContainer(FileHandleContainerTestBase, unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestVirtualFileSegmentFileHandleContainer, self).__init__(*args, **kwargs)
def _FileHandleContainerTestBase__createTestSubject(self):
return VirtualFileSegmentFileHandleContainer(16)
def _FileHandleContainerTestBase__getFileForRegularTests(self):
return self.tmpDir.regularfile2
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | {
"content_hash": "56c36f77ef96a25e8e9575a98cc64823",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 122,
"avg_line_length": 38.7,
"alnum_prop": 0.7803617571059431,
"repo_name": "seiferma/splitviewfuse",
"id": "437a49b2ad80b7a03aec49bcc55f6b4c6172475c",
"size": "774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/TestVirtualFileSegmentFileHandleContainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63881"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import warnings
from os.path import basename
from collections import OrderedDict
from glue.core.coordinates import coordinates_from_header, WCSCoordinates
from glue.core.data import Component, Data
from glue.config import data_factory, qglue_parser
__all__ = ['is_fits', 'fits_reader', 'is_casalike', 'casalike_cube']
def is_fits(filename):
from astropy.io import fits
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with fits.open(filename, ignore_missing_end=True):
return True
except IOError:
return False
@data_factory(
label='FITS file',
identifier=is_fits,
priority=100,
)
def fits_reader(source, auto_merge=False, exclude_exts=None, label=None):
"""
Read in all extensions from a FITS file.
Parameters
----------
source: str or HDUList
The pathname to the FITS file.
If an HDUList is passed in, simply use that.
auto_merge: bool
Merge extensions that have the same shape
and only one has a defined WCS.
exclude_exts: [hdu, ] or [index, ]
List of HDU's to exclude from reading.
This can be a list of HDU's or a list
of HDU indexes.
"""
from astropy.io import fits
from astropy.table import Table
exclude_exts = exclude_exts or []
if isinstance(source, fits.hdu.hdulist.HDUList):
hdulist = source
close_hdulist = False
else:
hdulist = fits.open(source, ignore_missing_end=True)
hdulist.verify('fix')
close_hdulist = True
groups = OrderedDict()
extension_by_shape = OrderedDict()
if label is not None:
label_base = label
else:
hdulist_name = hdulist.filename()
if hdulist_name is None:
hdulist_name = "HDUList"
label_base = basename(hdulist_name).rpartition('.')[0]
if not label_base:
label_base = basename(hdulist_name)
# Create a new image Data.
def new_data():
label = '{0}[{1}]'.format(
label_base,
hdu_name
)
data = Data(label=label)
data.coords = coords
groups[hdu_name] = data
extension_by_shape[shape] = hdu_name
return data
for extnum, hdu in enumerate(hdulist):
hdu_name = hdu.name if hdu.name else "HDU{0}".format(extnum)
if (hdu.data is not None and
hdu.data.size > 0 and
hdu_name not in exclude_exts and
extnum not in exclude_exts):
if is_image_hdu(hdu):
shape = hdu.data.shape
coords = coordinates_from_header(hdu.header)
if not auto_merge or has_wcs(coords):
data = new_data()
else:
try:
data = groups[extension_by_shape[shape]]
except KeyError:
data = new_data()
data.add_component(component=hdu.data,
label=hdu_name)
elif is_table_hdu(hdu):
# Loop through columns and make component list
table = Table.read(hdu, format='fits')
label = '{0}[{1}]'.format(label_base, hdu_name)
data = Data(label=label)
groups[hdu_name] = data
for column_name in table.columns:
column = table[column_name]
if column.ndim != 1:
warnings.warn("Dropping column '{0}' since it is not 1-dimensional".format(column_name))
continue
component = Component.autotyped(column, units=column.unit)
data.add_component(component=component,
label=column_name)
if close_hdulist:
hdulist.close()
return [groups[idx] for idx in groups]
# Utilities
def is_image_hdu(hdu):
from astropy.io.fits.hdu import PrimaryHDU, ImageHDU, CompImageHDU
return isinstance(hdu, (PrimaryHDU, ImageHDU, CompImageHDU))
def is_table_hdu(hdu):
from astropy.io.fits.hdu import TableHDU, BinTableHDU
return isinstance(hdu, (TableHDU, BinTableHDU))
def has_wcs(coords):
return (isinstance(coords, WCSCoordinates) and
any(axis['coordinate_type'] is not None
for axis in coords.wcs.get_axis_types()))
def is_casalike(filename, **kwargs):
"""
Check if a FITS file is a CASA like cube,
with (P, P, V, Stokes) layout
"""
from astropy.io import fits
if not is_fits(filename):
return False
with fits.open(filename, ignore_missing_end=True) as hdulist:
if len(hdulist) != 1:
return False
if hdulist[0].header['NAXIS'] != 4:
return False
from astropy.wcs import WCS
w = WCS(hdulist[0].header)
ax = [a.get('coordinate_type') for a in w.get_axis_types()]
return ax == ['celestial', 'celestial', 'spectral', 'stokes']
@data_factory(label='CASA PPV Cube', identifier=is_casalike, deprecated=True)
def casalike_cube(filename, **kwargs):
"""
This provides special support for 4D CASA FITS - like cubes,
which have 2 spatial axes, a spectral axis, and a stokes axis
in that order.
Each stokes cube is split out as a separate component
"""
from astropy.io import fits
result = Data()
if 'ignore_missing_end' not in kwargs:
kwargs['ignore_missing_end'] = True
with fits.open(filename, **kwargs) as hdulist:
array = hdulist[0].data
header = hdulist[0].header
result.coords = coordinates_from_header(header)
for i in range(array.shape[0]):
result.add_component(array[[i]], label='STOKES %i' % i)
return result
try:
from astropy.io.fits import HDUList
except ImportError:
pass
else:
# Put HDUList parser before list parser
@qglue_parser(HDUList, priority=100)
def _parse_data_hdulist(data, label):
from glue.core.data_factories.fits import fits_reader
return fits_reader(data, label=label)
| {
"content_hash": "653f1b812b8a0887119bc3fe5c35de32",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 112,
"avg_line_length": 30.028985507246375,
"alnum_prop": 0.5928249678249679,
"repo_name": "stscieisenhamer/glue",
"id": "53e3a44fd6a80b952f4413d6dbb8183ca5b322cf",
"size": "6216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/core/data_factories/fits.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1591083"
},
{
"name": "Shell",
"bytes": "1603"
}
],
"symlink_target": ""
} |
"""Tests for DancingCatch."""
from absl.testing import absltest
from absl.testing import parameterized
from csuite.environments import dancing_catch
class DancingCatchTest(parameterized.TestCase):
def test_environment_setup(self):
"""Tests environment initialization."""
env = dancing_catch.DancingCatch()
self.assertIsNotNone(env)
def test_start(self):
"""Tests environment start."""
env = dancing_catch.DancingCatch()
params = env.get_config()
with self.subTest(name='step_without_start'):
# Calling step before start should raise an error.
with self.assertRaises(RuntimeError):
env.step(dancing_catch.Action.LEFT)
with self.subTest(name='start_state'):
start_obs = env.start()
state = env.get_state()
# Paddle should be positioned at the bottom of the board.
self.assertEqual(state.paddle_y, params.rows - 1)
paddle_idx = state.paddle_y * params.columns + state.paddle_x
self.assertEqual(start_obs[paddle_idx], 1)
# First ball should be positioned at the top of the board.
ball_x = state.balls[0][0]
ball_y = state.balls[0][1]
self.assertEqual(ball_y, 0)
ball_idx = ball_y * params.columns + ball_x
self.assertEqual(start_obs[ball_idx], 1)
def test_invalid_state(self):
"""Tests setting environment state with invalid fields."""
env = dancing_catch.DancingCatch()
env.start()
with self.subTest(name='paddle_out_of_range'):
new_state = env.get_state()
new_state.paddle_x = 5
with self.assertRaises(ValueError):
env.set_state(new_state)
with self.subTest(name='balls_out_of_range'):
new_state = env.get_state()
new_state.balls = [(0, -1)]
with self.assertRaises(ValueError):
env.set_state(new_state)
@parameterized.parameters((0, 0, 1), (2, 1, 3), (4, 3, 4))
def test_one_step(self, paddle_x, expected_left_x, expected_right_x):
"""Tests one environment step given the x-position of the paddle."""
env = dancing_catch.DancingCatch()
env.start()
with self.subTest(name='invalid_action'):
with self.assertRaises(ValueError):
env.step(3)
with self.subTest(name='move_left_step'):
current_state = env.get_state()
current_state.paddle_x = paddle_x
env.set_state(current_state)
env.step(dancing_catch.Action.LEFT)
state = env.get_state()
# Paddle x-position should have moved left by 1 unless at the edge.
self.assertEqual(state.paddle_x, expected_left_x)
with self.subTest(name='move_right_step'):
current_state = env.get_state()
current_state.paddle_x = paddle_x
env.set_state(current_state)
env.step(dancing_catch.Action.RIGHT)
state = env.get_state()
# Paddle x-position should have moved right by 1 unless at the edge.
self.assertEqual(state.paddle_x, expected_right_x)
with self.subTest(name='stay_step'):
current_state = env.get_state()
current_state.paddle_x = paddle_x
env.set_state(current_state)
env.step(dancing_catch.Action.STAY)
state = env.get_state()
self.assertEqual(state.paddle_x, paddle_x)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "86f646311f6a0e06e392ecd784b1b68f",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 74,
"avg_line_length": 32.41,
"alnum_prop": 0.6590558469608145,
"repo_name": "deepmind/csuite",
"id": "89cc24fa789fcd53fba335756f283f71c94033b8",
"size": "3938",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "csuite/environments/dancing_catch_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "125730"
}
],
"symlink_target": ""
} |
import os
import keras
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from lang_config import *
def count_chars(text):
"""
Counts number of chars in text based on given alphabet.
"""
alphabet_counts = [0] * len(alphabet)
for char in text:
index = char_index.get(char, -1)
if index >= 0:
alphabet_counts[index] += 1
return alphabet_counts
def get_sample(file_content, start_index, sample_size):
"""
Returns a sample of text from `file_content` of length no more than `sample_size`,
starting at `start_index` and preserving full words.
"""
# we want to start from full first word
# if the first character is not space, move to next ones
while not file_content[start_index].isspace():
start_index += 1
# now we look for first non-space character - beginning of any word
while file_content[start_index].isspace():
start_index += 1
end_index = min(len(file_content) - 1, start_index + sample_size)
# we also want full words at the end
while not file_content[end_index].isspace():
end_index -= 1
return file_content[start_index:end_index]
def build_input_vector(sample_text):
"""
Creates an input vector for the NN from the provided sample.
Currently it is the vector of letter counts.
"""
return count_chars(sample_text.lower())
vector_size = len(build_input_vector(""))
def create_sample_vectors(cleaned_data_directory, out_vectors_path):
"""
Generates input vectors for the NN.
"""
vectors = []
for filename in os.listdir(cleaned_data_directory):
if not filename.endswith(".txt"):
continue
path = os.path.join(cleaned_data_directory, filename)
f = open(path, mode='r', encoding='utf8')
print("Processing", path)
lang = filename[:2]
lang_number = language_codes.index(lang)
print(f"\tLanguage: {lang} ({lang_number})")
print("\tReading...", end=' ')
file_content = f.read()
content_length = len(file_content)
print("done.")
print("\tExtracting vectors...", end=' ')
sample_start_index = 0
count = 0
while sample_start_index + text_sample_size < content_length:
sample = get_sample(file_content, sample_start_index, text_sample_size)
input_vector = build_input_vector(sample)
vector = input_vector + [lang_number]
vectors.append(vector)
sample_start_index += text_sample_size
count += 1
print("done.")
print(f"\tExtracted {count} vectors.")
del file_content
print(f"Total {len(vectors)} vectors.")
np_vectors = np.array(vectors, dtype=np.uint16)
np.random.shuffle(np_vectors)
print(f"Converted to NumPy array, shape: {np_vectors.shape}.")
np.savez_compressed(out_vectors_path, data=np_vectors)
print(f"Saved to {out_vectors_path}.")
def size_kb(path):
"""
Returns file size in KB.
"""
size = os.path.getsize(path)
return '{:.2f}'.format(size / 1000.0)
def gen_train_test(vectors_path, out_train_test_path):
"""
Generates train/test data with preprocessing.
"""
data = np.load(vectors_path)['data']
x = data[:, 0:vector_size]
y = data[:, vector_size]
del data
# x preprocessing
standard_scaler = preprocessing.StandardScaler().fit(x)
x = standard_scaler.transform(x)
# Convert y to binary class matrix (for categorical_crossentropy)
y = keras.utils.to_categorical(y, num_classes=len(language_codes))
# Static seed to have comparable results for different runs
seed = 42
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20, random_state=seed)
del x, y
np.savez_compressed(out_train_test_path, X_train=x_train, y_train=y_train, X_test=x_test, y_test=y_test)
print(f"Saved train/test data to {out_train_test_path}, size: {size_kb(out_train_test_path)} KB.")
del x_train, y_train, x_test, y_test
| {
"content_hash": "6641351224f17d916cba4b1be6a70f87",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 108,
"avg_line_length": 27.70469798657718,
"alnum_prop": 0.6327519379844961,
"repo_name": "MaKToff/SPbSU_Homeworks",
"id": "0ad03df7e2ff6637eedee1d23642f1a7f6d75cca",
"size": "4128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Semester 7/Natural language processing/preprocessing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "48341"
},
{
"name": "F#",
"bytes": "131089"
},
{
"name": "Haskell",
"bytes": "15908"
},
{
"name": "Python",
"bytes": "41993"
}
],
"symlink_target": ""
} |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.stanza import Error
from sleekxmpp.stanza.rootstanza import RootStanza
from sleekxmpp.xmlstream import StanzaBase, ET
class Message(RootStanza):
"""
XMPP's <message> stanzas are a "push" mechanism to send information
to other XMPP entities without requiring a response.
Chat clients will typically use <message> stanzas that have a type
of either "chat" or "groupchat".
When handling a message event, be sure to check if the message is
an error response.
Example <message> stanzas:
<message to="user1@example.com" from="user2@example.com">
<body>Hi!</body>
</message>
<message type="groupchat" to="room@conference.example.com">
<body>Hi everyone!</body>
</message>
Stanza Interface:
body -- The main contents of the message.
subject -- An optional description of the message's contents.
mucroom -- (Read-only) The name of the MUC room that sent the message.
mucnick -- (Read-only) The MUC nickname of message's sender.
Attributes:
types -- May be one of: normal, chat, headline, groupchat, or error.
Methods:
setup -- Overrides StanzaBase.setup.
chat -- Set the message type to 'chat'.
normal -- Set the message type to 'normal'.
reply -- Overrides StanzaBase.reply
get_type -- Overrides StanzaBase interface
get_mucroom -- Return the name of the MUC room of the message.
set_mucroom -- Dummy method to prevent assignment.
del_mucroom -- Dummy method to prevent deletion.
get_mucnick -- Return the MUC nickname of the message's sender.
set_mucnick -- Dummy method to prevent assignment.
del_mucnick -- Dummy method to prevent deletion.
"""
namespace = 'jabber:client'
name = 'message'
interfaces = set(('type', 'to', 'from', 'id', 'body', 'subject',
'mucroom', 'mucnick'))
sub_interfaces = set(('body', 'subject'))
plugin_attrib = name
types = set((None, 'normal', 'chat', 'headline', 'error', 'groupchat'))
def get_type(self):
"""
Return the message type.
Overrides default stanza interface behavior.
Returns 'normal' if no type attribute is present.
"""
return self._get_attr('type', 'normal')
def chat(self):
"""Set the message type to 'chat'."""
self['type'] = 'chat'
return self
def normal(self):
"""Set the message type to 'normal'."""
self['type'] = 'normal'
return self
def reply(self, body=None, clear=True):
"""
Create a message reply.
Overrides StanzaBase.reply.
Sets proper 'to' attribute if the message is from a MUC, and
adds a message body if one is given.
Arguments:
body -- Optional text content for the message.
clear -- Indicates if existing content should be removed
before replying. Defaults to True.
"""
StanzaBase.reply(self, clear)
if self['type'] == 'groupchat':
self['to'] = self['to'].bare
del self['id']
if body is not None:
self['body'] = body
return self
def get_mucroom(self):
"""
Return the name of the MUC room where the message originated.
Read-only stanza interface.
"""
if self['type'] == 'groupchat':
return self['from'].bare
else:
return ''
def get_mucnick(self):
"""
Return the nickname of the MUC user that sent the message.
Read-only stanza interface.
"""
if self['type'] == 'groupchat':
return self['from'].resource
else:
return ''
def set_mucroom(self, value):
"""Dummy method to prevent modification."""
pass
def del_mucroom(self):
"""Dummy method to prevent deletion."""
pass
def set_mucnick(self, value):
"""Dummy method to prevent modification."""
pass
def del_mucnick(self):
"""Dummy method to prevent deletion."""
pass
# To comply with PEP8, method names now use underscores.
# Deprecated method names are re-mapped for backwards compatibility.
Message.getType = Message.get_type
Message.getMucroom = Message.get_mucroom
Message.setMucroom = Message.set_mucroom
Message.delMucroom = Message.del_mucroom
Message.getMucnick = Message.get_mucnick
Message.setMucnick = Message.set_mucnick
Message.delMucnick = Message.del_mucnick
| {
"content_hash": "fa6e60f1075c6e629036f540faa6b48d",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 78,
"avg_line_length": 30.64968152866242,
"alnum_prop": 0.6084788029925187,
"repo_name": "Petraea/jsonbot",
"id": "19d4d9e27c1353ce81b69affa35c68fd323683ac",
"size": "4812",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jsb/contrib/sleekxmpp/stanza/message.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36140"
},
{
"name": "JavaScript",
"bytes": "42430"
},
{
"name": "Python",
"bytes": "3234788"
},
{
"name": "Shell",
"bytes": "1874"
}
],
"symlink_target": ""
} |
import unittest
import random
import sys
from types import IntType, DictType, ListType, StringType, FloatType
# import classes
import analytics.utils.misc as misc
import analytics.exceptions.exceptions as ex
from analytics.core.dataitem import DataItem
from analytics.core.cluster import Cluster
from analytics.core.element import Element
import analytics.algorithms.rank as rank
from analytics.core.attribute.feature import Feature
from analytics.core.attribute.dynamic import Dynamic
from analytics.core.pulse import Pulse, StaticPulse, DynamicPulse
# some general input to test
general_input = [
None, True, False, sys.maxint, -sys.maxint-1, {}, [],
{"1": 1, "2": 2}, [1, 2, 3, 4, 5], "abc", 0, 1, -1, 1.233,
-3.343, 0.23435, " string ", " test test test ", "1"
]
class DataItem_TestSequence(unittest.TestCase):
def setUp(self):
self._iterations = 20
self._teststr = "test string"
def test_dataitem_init(self):
for it in range(self._iterations):
# generate random attributes
name = random.choice(general_input)
desc = random.choice(general_input)
rawseed = random.choice(general_input)
rawseed = rawseed if rawseed is not None else self._teststr
seed = str(rawseed).strip() if rawseed is not None else None
# create and test data item
d = DataItem(name, desc, rawseed)
id = misc.generateId(seed)
self.assertEqual(d._id, id)
self.assertEqual(d._name, str(name).strip())
self.assertEqual(d._desc, str(desc).strip())
def test_dataitem_id(self):
for it in range(self._iterations):
rawseed = random.choice(general_input)
rawseed = rawseed if rawseed is not None else self._teststr
d = DataItem(self._teststr, self._teststr, rawseed)
seed = str(rawseed).strip() if rawseed is not None else None
id = misc.generateId(seed)
self.assertEqual(d.id(), id)
def test_dataitem_name(self):
for it in range(self._iterations):
name = random.choice(general_input)
d = DataItem(name, self._teststr, None)
self.assertEqual(d.name(), str(name).strip())
def test_dataitem_desc(self):
for it in range(self._iterations):
desc = random.choice(general_input)
d = DataItem(self._teststr, desc, None)
self.assertEqual(d.desc(), str(desc).strip())
def test_dataitem_getJSON(self):
for it in range(self._iterations):
# generate random attributes
name = random.choice(general_input)
desc = random.choice(general_input)
rawseed = random.choice(general_input)
rawseed = rawseed if rawseed is not None else self._teststr
seed = str(rawseed).strip() if rawseed is not None else None
# create and test data item json object
d = DataItem(name, desc, seed)
obj = d.getJSON()
id = misc.generateId(seed)
self.assertEqual(type(obj), DictType)
self.assertEqual(obj["id"], id)
self.assertEqual(obj["name"], str(name).strip())
self.assertEqual(obj["desc"], str(desc).strip())
class Cluster_TestSequence(unittest.TestCase):
def setUp(self):
self._iterations = 20
self._teststr = "test string"
self._parentSeq = [
None,
Cluster(None, self._teststr, self._teststr),
DataItem(self._teststr, self._teststr)
]
def test_cluster_init(self):
# test iterations
for it in range(self._iterations):
for parent in self._parentSeq:
# generate random attributes
id = str(random.choice(general_input)).strip()
name = random.choice(general_input)
desc = random.choice(general_input)
seed = random.choice(general_input)
# create and test cluster
if parent is not None and type(parent) is not Cluster:
with self.assertRaises(ex.AnalyticsCheckError):
d = Cluster(id, name, desc, parent)
else:
d = Cluster(id, name, desc, parent)
self.assertEqual(d._id, misc.generateId(id))
self.assertEqual(d._name, str(name).strip())
self.assertEqual(d._desc, str(desc).strip())
self.assertEqual(d._parent, parent)
self.assertEqual(d._children, {})
def test_cluster_initUniqueId(self):
for it in range(self._iterations):
# as random input can be None, id will be random
id = str(random.choice(general_input)).strip()
name = random.choice(general_input)
desc = random.choice(general_input)
cl1 = Cluster(id, name, desc)
cl2 = Cluster(id, name, desc)
# check that they have the same ids
self.assertEqual(cl1.id(), cl2.id())
def test_cluster_parent(self):
for parent in self._parentSeq:
if parent is None or type(parent) is Cluster:
d = Cluster(self._teststr, self._teststr, self._teststr, parent)
self.assertEqual(d.parent(), parent)
else:
with self.assertRaises(ex.AnalyticsCheckError):
d = Cluster(
self._teststr,
self._teststr,
self._teststr,
parent
)
def test_cluster_children(self):
# quite simple test
d = Cluster(self._teststr, self._teststr, self._teststr)
self.assertEqual(type(d.children()), ListType)
self.assertEqual(d.children(), [])
def test_cluster_isLeaf(self):
# quite simple test
d = Cluster(self._teststr, self._teststr, self._teststr)
self.assertEqual(d.isLeaf(), True)
def test_cluster_setParent(self):
for parent in self._parentSeq:
d = Cluster(self._teststr, self._teststr, self._teststr)
self.assertEqual(d.parent(), None)
if parent is None or type(parent) is Cluster:
d.setParent(parent)
self.assertEqual(d.parent(), parent)
else:
with self.assertRaises(ex.AnalyticsCheckError):
d.setParent(parent)
def test_cluster_makeLeaf(self):
# create cluster
d = Cluster(self._teststr, self._teststr, self._teststr)
# check leaf
self.assertEqual(d.isLeaf(), True)
# assign some info
d._children["test"] = "string"
self.assertEqual(d.isLeaf(), False)
# make it leaf
d.makeLeaf()
self.assertEqual(d.isLeaf(), True)
self.assertEqual(d._children, {})
def test_cluster_addChild(self):
# create parent cluster and child cluster
parent = Cluster(self._teststr, self._teststr, self._teststr)
for it in range(self._iterations):
name = random.choice(general_input)
desc = random.choice(general_input)
children = [
None,
parent,
Cluster(None, name, desc),
DataItem(name, desc)
]
for child in children:
if type(child) is not Cluster:
with self.assertRaises(ex.AnalyticsCheckError):
parent.addChild(child)
elif child.id() == parent.id():
with self.assertRaises(ex.AnalyticsStandardError):
parent.addChild(child)
else:
parent.addChild(child)
# number of children (theoretically):
num = self._iterations
self.assertEqual(len(parent.children()), num)
def test_cluster_removeChild(self):
parent = Cluster(self._teststr, self._teststr, self._teststr)
for it in range(self._iterations):
name = random.choice(general_input)
desc = random.choice(general_input)
# prepare children
children = [
None,
parent,
Cluster(None, name, desc),
DataItem(name, desc)
]
for child in children:
# check type of the child
if type(child) is not Cluster:
with self.assertRaises(ex.AnalyticsCheckError):
parent.addChild(child)
elif child.id() == parent.id():
with self.assertRaises(ex.AnalyticsStandardError):
parent.addChild(child)
else:
parent.addChild(child)
parent.removeChild(child)
self.assertEqual(parent.children(), [])
self.assertEqual(parent.children(), [])
def test_cluster_getJSON(self):
for it in range(self._iterations):
id = random.choice(general_input)
name = random.choice(general_input)
desc = random.choice(general_input)
parents = [None, Cluster(id, name, desc)]
for parent in parents:
cl = Cluster(
self._teststr,
self._teststr,
self._teststr,
parent
)
obj = cl.getJSON()
pid = None if parent is None else parent.id()
self.assertEqual(obj["id"], cl.id())
self.assertEqual(obj["name"], cl.name())
self.assertEqual(obj["desc"], cl.desc())
self.assertEqual(obj["parent"], pid)
# has to be equal to empty array for this case
self.assertEqual(obj["children"], [])
class Element_TestSequence(unittest.TestCase):
def setUp(self):
self._iterations = 20
self._teststr = "test string"
self._clusters = [
None,
Cluster(self._teststr, self._teststr, self._teststr),
DataItem(self._teststr, self._teststr),
sys.maxint,
-sys.maxint-1
]
self._ranks = [
None,
rank.RSYS.UND_RANK,
sys.maxint,
-sys.maxint-1
]
self._features = [
None,
sys.maxint,
-sys.maxint-1,
Feature(self._teststr, self._teststr, self._teststr)
]
def test_element_init(self):
for it in range(self._iterations):
id = str(random.choice(general_input)).strip()
name = random.choice(general_input)
desc = random.choice(general_input)
cluster = random.choice(self._clusters)
r = random.choice(self._ranks)
if cluster is not None and type(cluster) is not Cluster:
with self.assertRaises(ex.AnalyticsCheckError):
el = Element(id, name, desc, cluster, r)
elif type(r) is not rank.Rank:
with self.assertRaises(ex.AnalyticsCheckError):
el = Element(id, name, desc, cluster, r)
else:
el = Element(id, name, desc, cluster, r)
self.assertEqual(el.id(), misc.generateId(id))
self.assertEqual(el.name(), str(name).strip())
self.assertEqual(el.desc(), str(desc).strip())
self.assertEqual(el._cluster, cluster)
self.assertEqual(el._rank, r)
self.assertEqual(el._features, {})
def test_element_cluster(self):
for cluster in self._clusters:
if cluster is None or type(cluster) is Cluster:
el = Element(None, self._teststr, self._teststr, cluster)
self.assertEqual(el.cluster(), cluster)
else:
with self.assertRaises(ex.AnalyticsCheckError):
el = Element(None, self._teststr, self._teststr, cluster)
def test_element_rank(self):
for r in self._ranks:
if type(r) is rank.Rank:
el = Element(None, self._teststr, self._teststr, None, r)
self.assertEqual(el.rank(), r)
else:
with self.assertRaises(ex.AnalyticsCheckError):
el = Element(None, self._teststr, self._teststr, None, r)
def test_element_features(self):
el = Element(None, self._teststr, self._teststr)
self.assertEqual(el.features(), [])
def test_element_addFeature(self):
el = Element(None, self._teststr, self._teststr)
for feature in self._features:
if type(feature) is Feature:
el.addFeature(feature)
self.assertEqual(el.features(), [feature])
else:
with self.assertRaises(ex.AnalyticsCheckError):
el.addFeature(feature)
def test_element_addFeatures(self):
el = Element(None, self._teststr, self._teststr)
with self.assertRaises(ex.AnalyticsCheckError):
el.addFeatures(self._features)
# create new list of features
features = [
Feature("#1", self._teststr, self._teststr),
Feature("#2", self._teststr, self._teststr),
]
el.addFeatures(features)
self.assertEqual(len(el.features()), len(features))
# sorted list of features ids
featuresIds = sorted([a.id() for a in features])
self.assertEqual(sorted(el._features.keys()), featuresIds)
def test_element_getJSON(self):
# initialise clusers and ranks
clusters = [None, Cluster(None, self._teststr, self._teststr)]
ranks = [None, rank.RSYS.UND_RANK]
for it in range(self._iterations):
clr = random.choice(clusters)
rnk = random.choice(ranks)
if rnk is None:
with self.assertRaises(ex.AnalyticsCheckError):
el = Element(None, self._teststr, self._teststr, clr, rnk)
continue
el = Element(None, self._teststr, self._teststr, clr, rnk)
obj = el.getJSON()
self.assertEqual(obj["cluster"], None if clr is None else clr.id())
self.assertEqual(obj["rank"], None if rnk is None else rnk.getJSON())
self.assertEqual(obj["features"], [])
class Pulse_TestSequence(unittest.TestCase):
def setUp(self):
self._iterations = 20
self._teststr = "test string"
def test_pulse_init(self):
for it in range(self._iterations):
# generate attributes
name = random.choice(general_input)
desc = random.choice(general_input)
sample = random.choice(general_input)
seed = str(name).strip() + type(sample).__name__
# create pulse instance
pulse = Pulse(name, desc, sample)
self.assertEqual(pulse.id(), misc.generateId(seed))
self.assertEqual(pulse.name(), str(name).strip())
self.assertEqual(pulse.desc(), str(desc).strip())
self.assertEqual(pulse._type, type(sample))
self.assertEqual(len(pulse._store), len(set()))
self.assertEqual(pulse._default, None)
def test_pulse_type(self):
for sample in general_input:
pulse = Pulse(self._teststr, self._teststr, sample)
self.assertEqual(pulse.type(), type(sample))
def test_pulse_store(self):
pulse = Pulse(self._teststr, self._teststr, self._teststr)
self.assertEqual(len(pulse.store()), len(set()))
def test_pulse_default(self):
pulse = Pulse(self._teststr, self._teststr, self._teststr)
self.assertEqual(pulse.default(), None)
def test_pulse_addValueToStore(self):
samples = [1, "1", 1.0]
for sample in samples:
testset = set()
pulse = Pulse(self._teststr, self._teststr, sample)
for it in range(self._iterations):
value = random.choice(general_input)
pulse.addValueToStore(value)
if type(value) == type(sample):
testset.add(value)
self.assertEqual(len(pulse.store()), len(testset))
def test_pulse_getJSON(self):
for sample in general_input:
pulse = Pulse(self._teststr, self._teststr, sample)
obj = pulse.getJSON()
self.assertEqual(obj["type"], pulse.type().__name__)
self.assertEqual(obj["default"], pulse.default())
def test_pulse_static(self):
for sample in general_input:
pulse = Pulse(self._teststr, self._teststr, sample)
# pulse static property is always static
self.assertEqual(pulse.static(), True)
class StaticPulse_TestSequence(unittest.TestCase):
def setUp(self):
self._iterations = 20
self._sample = 1
self._teststr = "test string"
def test_staticpulse_setDefaultValue(self):
test_sample = 1
pulse = StaticPulse(self._teststr, self._teststr, self._sample)
for value in general_input:
pulse.addValueToStore(value)
test_default = None
# go through general input
for default in general_input:
if default is None or type(default) is type(self._sample):
test_default = default
pulse.setDefaultValue(default)
self.assertEqual(pulse.default(), test_default)
def test_staticpulse_static(self):
pulse = StaticPulse(self._teststr, self._teststr, self._sample)
self.assertEqual(pulse.static(), True)
class DynamicPulse_TestSequence(unittest.TestCase):
def setUp(self):
self._iterations = 20
self._sample = 117.0
self._prior = Dynamic.ForwardPriority
self._teststr = "test string"
def test_dynamicpulse_setDefaultValue_static(self):
# test staticly behaving dynamic pulse
pulse = DynamicPulse(self._teststr, self._teststr,
self._sample, self._prior, True)
for value in general_input:
pulse.addValueToStore(value)
# add sample parameter
updated_list = general_input
updated_list.append(self._sample)
test_default = None
# go through values of updated list
for default in updated_list:
pulse.setDefaultValue(default)
if default is None:
test_default = default
elif type(default)==type(self._sample) and default in pulse.store():
test_default = default
self.assertEqual(pulse.default(), test_default)
def test_dynamicpulse_setDefaultValue_dynamic(self):
# test fully dynamic pulse
pulse = DynamicPulse(self._teststr, self._teststr,
self._sample, self._prior, False)
for value in general_input:
pulse.addValueToStore(value)
# add sample parameter
updated_list = general_input
updated_list.append(self._sample)
test_default = None
for default in general_input:
# must assign value without checking in the store
if default is None:
if pulse._store is not None:
s = sum(pulse._store); n = len(pulse._store)
if pulse._type is IntType:
test_default = int(s*1.0/n)
elif pulse._type is FloatType:
test_default = round(s*1.0/n, 2)
else:
test_default = None
else:
test_default = None
elif type(default) == type(self._sample):
test_default = default
pulse.setDefaultValue(default)
self.assertEqual(pulse.default(), test_default)
def test_dynamicpulse_static(self):
pulse = DynamicPulse(self._teststr, self._teststr,
self._sample, self._prior, False)
self.assertEqual(pulse.static(), False)
pulse = DynamicPulse(self._teststr, self._teststr,
self._sample, self._prior, True)
self.assertEqual(pulse.static(), True)
def test_dynamicpulse_setStatic(self):
pulse = DynamicPulse(self._teststr, self._teststr,
self._sample, self._prior, False)
self.assertEqual(pulse.static(), False)
pulse.setStatic(True)
self.assertEqual(pulse.static(), True)
pulse.setStatic([])
self.assertEqual(pulse.static(), False)
pulse.setStatic(1)
self.assertEqual(pulse.static(), True)
def test_dynamicpulse_changeDefault(self):
# sample of data and maximum elements in range
sample = 1; max_el = 5
pulse = DynamicPulse(self._teststr, self._teststr,
sample, self._prior, False)
self.assertEqual(pulse.default(), None)
for i in range(max_el):
pulse.addValueToStore(i)
default = int(sum(range(max_el))*1.0 / len(range(max_el)))
self.assertEqual(pulse.default(), default)
# set default that out of range
pulse.setDefaultValue(max_el*2)
self.assertEqual(pulse.default(), max_el*2)
# change static attribute to True
pulse.setStatic(True)
self.assertEqual(pulse.default(), None)
pulse.setStatic(False)
# after reseting to dynamic default value should not be None
self.assertEqual(pulse.default(), default)
def test_dynamicpulse_floatDefault(self):
elems = [320.0, 300.0, 199.0, 234.0, 250.0, 245.0, 230.0]
avg = sum(elems)*1.0 / len(elems)
# round average
avg = int(avg*100)*1.0 / 100
pulse = DynamicPulse(
self._teststr,
self._teststr,
elems[0],
self._prior
)
for elem in elems:
pulse.addValueToStore(elem)
self.assertEqual(pulse.default(), avg)
# Load test suites
def _suites():
return [
DataItem_TestSequence,
Cluster_TestSequence,
Element_TestSequence,
Pulse_TestSequence,
StaticPulse_TestSequence,
DynamicPulse_TestSequence
]
# Load tests
def loadSuites():
# global test suite for this module
gsuite = unittest.TestSuite()
for suite in _suites():
gsuite.addTest(unittest.TestLoader().loadTestsFromTestCase(suite))
return gsuite
if __name__ == '__main__':
suite = loadSuites()
print ""
print "### Running tests ###"
print "-" * 70
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "b7a36c43d7c5b3544b7115731bbc6991",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 81,
"avg_line_length": 39.68402777777778,
"alnum_prop": 0.565228803919853,
"repo_name": "sadikovi/pulsar",
"id": "5c3545646f909f41125624dfc1114d071cdc009a",
"size": "22895",
"binary": false,
"copies": "1",
"ref": "refs/heads/analytics",
"path": "analytics/core/tests/unittest_core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6019"
},
{
"name": "HTML",
"bytes": "20112"
},
{
"name": "JavaScript",
"bytes": "119345"
},
{
"name": "Python",
"bytes": "281497"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
} |
"""
sphinx.builders.latex.transforms
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Transforms for LaTeX builder.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from typing import cast
from docutils import nodes
from sphinx import addnodes
from sphinx.builders.latex.nodes import (
captioned_literal_block, footnotemark, footnotetext, math_reference, thebibliography
)
from sphinx.transforms import SphinxTransform
from sphinx.util.nodes import NodeMatcher
if False:
# For type annotation
from typing import Any, List, Set, Tuple # NOQA
URI_SCHEMES = ('mailto:', 'http:', 'https:', 'ftp:')
class FootnoteDocnameUpdater(SphinxTransform):
"""Add docname to footnote and footnote_reference nodes."""
default_priority = 700
TARGET_NODES = (nodes.footnote, nodes.footnote_reference)
def apply(self, **kwargs):
# type: (Any) -> None
matcher = NodeMatcher(*self.TARGET_NODES)
for node in self.document.traverse(matcher): # type: nodes.Element
node['docname'] = self.env.docname
class ShowUrlsTransform(SphinxTransform):
"""Expand references to inline text or footnotes.
For more information, see :confval:`latex_show_urls`.
.. note:: This transform is used for integrated doctree
"""
default_priority = 400
# references are expanded to footnotes (or not)
expanded = False
def apply(self, **kwargs):
# type: (Any) -> None
try:
# replace id_prefix temporarily
settings = self.document.settings # type: Any
id_prefix = settings.id_prefix
settings.id_prefix = 'show_urls'
self.expand_show_urls()
if self.expanded:
self.renumber_footnotes()
finally:
# restore id_prefix
settings.id_prefix = id_prefix
def expand_show_urls(self):
# type: () -> None
show_urls = self.config.latex_show_urls
if show_urls is False or show_urls == 'no':
return
for node in self.document.traverse(nodes.reference):
uri = node.get('refuri', '')
if uri.startswith(URI_SCHEMES):
if uri.startswith('mailto:'):
uri = uri[7:]
if node.astext() != uri:
index = node.parent.index(node)
docname = self.get_docname_for_node(node)
if show_urls == 'footnote':
fn, fnref = self.create_footnote(uri, docname)
node.parent.insert(index + 1, fn)
node.parent.insert(index + 2, fnref)
self.expanded = True
else: # all other true values (b/w compat)
textnode = nodes.Text(" (%s)" % uri)
node.parent.insert(index + 1, textnode)
def get_docname_for_node(self, node):
# type: (nodes.Node) -> str
while node:
if isinstance(node, nodes.document):
return self.env.path2doc(node['source'])
elif isinstance(node, addnodes.start_of_file):
return node['docname']
else:
node = node.parent
return None # never reached here. only for type hinting
def create_footnote(self, uri, docname):
# type: (str, str) -> Tuple[nodes.footnote, nodes.footnote_reference]
reference = nodes.reference('', nodes.Text(uri), refuri=uri, nolinkurl=True)
footnote = nodes.footnote(uri, auto=1, docname=docname)
footnote['names'].append('#')
footnote += nodes.label('', '#')
footnote += nodes.paragraph('', '', reference)
self.document.note_autofootnote(footnote)
footnote_ref = nodes.footnote_reference('[#]_', auto=1,
refid=footnote['ids'][0], docname=docname)
footnote_ref += nodes.Text('#')
self.document.note_autofootnote_ref(footnote_ref)
footnote.add_backref(footnote_ref['ids'][0])
return footnote, footnote_ref
def renumber_footnotes(self):
# type: () -> None
collector = FootnoteCollector(self.document)
self.document.walkabout(collector)
num = 0
for footnote in collector.auto_footnotes:
# search unused footnote number
while True:
num += 1
if str(num) not in collector.used_footnote_numbers:
break
# assign new footnote number
old_label = cast(nodes.label, footnote[0])
old_label.replace_self(nodes.label('', str(num)))
if old_label in footnote['names']:
footnote['names'].remove(old_label.astext())
footnote['names'].append(str(num))
# update footnote_references by new footnote number
docname = footnote['docname']
for ref in collector.footnote_refs:
if docname == ref['docname'] and footnote['ids'][0] == ref['refid']:
ref.remove(ref[0])
ref += nodes.Text(str(num))
class FootnoteCollector(nodes.NodeVisitor):
"""Collect footnotes and footnote references on the document"""
def __init__(self, document):
# type: (nodes.document) -> None
self.auto_footnotes = [] # type: List[nodes.footnote]
self.used_footnote_numbers = set() # type: Set[str]
self.footnote_refs = [] # type: List[nodes.footnote_reference]
super().__init__(document)
def unknown_visit(self, node):
# type: (nodes.Node) -> None
pass
def unknown_departure(self, node):
# type: (nodes.Node) -> None
pass
def visit_footnote(self, node):
# type: (nodes.footnote) -> None
if node.get('auto'):
self.auto_footnotes.append(node)
else:
for name in node['names']:
self.used_footnote_numbers.add(name)
def visit_footnote_reference(self, node):
# type: (nodes.footnote_reference) -> None
self.footnote_refs.append(node)
class LaTeXFootnoteTransform(SphinxTransform):
"""Convert footnote definitions and references to appropriate form to LaTeX.
* Replace footnotes on restricted zone (e.g. headings) by footnotemark node.
In addition, append a footnotetext node after the zone.
Before::
<section>
<title>
headings having footnotes
<footnote_reference>
1
<footnote ids="1">
<label>
1
<paragraph>
footnote body
After::
<section>
<title>
headings having footnotes
<footnotemark>
1
<footnotetext>
footnote body
<footnotetext>
<label>
1
<paragraph>
footnote body
* Integrate footnote definitions and footnote references to single footnote node
Before::
blah blah blah
<footnote_reference refid="id1">
1
blah blah blah ...
<footnote ids="1">
<label>
1
<paragraph>
footnote body
After::
blah blah blah
<footnote ids="1">
<label>
1
<paragraph>
footnote body
blah blah blah ...
* Replace second and subsequent footnote references which refers same footnote definition
by footnotemark node.
Before::
blah blah blah
<footnote_reference refid="id1">
1
blah blah blah
<footnote_reference refid="id1">
1
blah blah blah ...
<footnote ids="1">
<label>
1
<paragraph>
footnote body
After::
blah blah blah
<footnote ids="1">
<label>
1
<paragraph>
footnote body
blah blah blah
<footnotemark>
1
blah blah blah ...
* Remove unreferenced footnotes
Before::
<footnote ids="1">
<label>
1
<paragraph>
Unreferenced footnote!
After::
<!-- nothing! -->
* Move footnotes in a title of table or thead to head of tbody
Before::
<table>
<title>
title having footnote_reference
<footnote_reference refid="1">
1
<tgroup>
<thead>
<row>
<entry>
header having footnote_reference
<footnote_reference refid="2">
2
<tbody>
<row>
...
<footnote ids="1">
<label>
1
<paragraph>
footnote body
<footnote ids="2">
<label>
2
<paragraph>
footnote body
After::
<table>
<title>
title having footnote_reference
<footnotemark>
1
<tgroup>
<thead>
<row>
<entry>
header having footnote_reference
<footnotemark>
2
<tbody>
<footnotetext>
<label>
1
<paragraph>
footnote body
<footnotetext>
<label>
2
<paragraph>
footnote body
<row>
...
"""
default_priority = 600
def apply(self, **kwargs):
# type: (Any) -> None
footnotes = list(self.document.traverse(nodes.footnote))
for node in footnotes:
node.parent.remove(node)
visitor = LaTeXFootnoteVisitor(self.document, footnotes)
self.document.walkabout(visitor)
class LaTeXFootnoteVisitor(nodes.NodeVisitor):
def __init__(self, document, footnotes):
# type: (nodes.document, List[nodes.footnote]) -> None
self.appeared = set() # type: Set[Tuple[str, str]]
self.footnotes = footnotes # type: List[nodes.footnote]
self.pendings = [] # type: List[nodes.footnote]
self.table_footnotes = [] # type: List[nodes.footnote]
self.restricted = None # type: nodes.Element
super().__init__(document)
def unknown_visit(self, node):
# type: (nodes.Node) -> None
pass
def unknown_departure(self, node):
# type: (nodes.Node) -> None
pass
def restrict(self, node):
# type: (nodes.Element) -> None
if self.restricted is None:
self.restricted = node
def unrestrict(self, node):
# type: (nodes.Element) -> None
if self.restricted == node:
self.restricted = None
pos = node.parent.index(node)
for i, footnote, in enumerate(self.pendings):
fntext = footnotetext('', *footnote.children)
node.parent.insert(pos + i + 1, fntext)
self.pendings = []
def visit_figure(self, node):
# type: (nodes.figure) -> None
self.restrict(node)
def depart_figure(self, node):
# type: (nodes.figure) -> None
self.unrestrict(node)
def visit_term(self, node):
# type: (nodes.term) -> None
self.restrict(node)
def depart_term(self, node):
# type: (nodes.term) -> None
self.unrestrict(node)
def visit_caption(self, node):
# type: (nodes.caption) -> None
self.restrict(node)
def depart_caption(self, node):
# type: (nodes.caption) -> None
self.unrestrict(node)
def visit_title(self, node):
# type: (nodes.title) -> None
if isinstance(node.parent, (nodes.section, nodes.table)):
self.restrict(node)
def depart_title(self, node):
# type: (nodes.title) -> None
if isinstance(node.parent, nodes.section):
self.unrestrict(node)
elif isinstance(node.parent, nodes.table):
self.table_footnotes += self.pendings
self.pendings = []
self.unrestrict(node)
def visit_thead(self, node):
# type: (nodes.thead) -> None
self.restrict(node)
def depart_thead(self, node):
# type: (nodes.thead) -> None
self.table_footnotes += self.pendings
self.pendings = []
self.unrestrict(node)
def depart_table(self, node):
# type: (nodes.table) -> None
tbody = list(node.traverse(nodes.tbody))[0]
for footnote in reversed(self.table_footnotes):
fntext = footnotetext('', *footnote.children)
tbody.insert(0, fntext)
self.table_footnotes = []
def visit_footnote(self, node):
# type: (nodes.footnote) -> None
self.restrict(node)
def depart_footnote(self, node):
# type: (nodes.footnote) -> None
self.unrestrict(node)
def visit_footnote_reference(self, node):
# type: (nodes.footnote_reference) -> None
number = node.astext().strip()
docname = node['docname']
if self.restricted:
mark = footnotemark('', number)
node.replace_self(mark)
if (docname, number) not in self.appeared:
footnote = self.get_footnote_by_reference(node)
self.pendings.append(footnote)
elif (docname, number) in self.appeared:
mark = footnotemark('', number)
node.replace_self(mark)
else:
footnote = self.get_footnote_by_reference(node)
self.footnotes.remove(footnote)
node.replace_self(footnote)
footnote.walkabout(self)
self.appeared.add((docname, number))
raise nodes.SkipNode
def get_footnote_by_reference(self, node):
# type: (nodes.footnote_reference) -> nodes.footnote
docname = node['docname']
for footnote in self.footnotes:
if docname == footnote['docname'] and footnote['ids'][0] == node['refid']:
return footnote
return None
class BibliographyTransform(SphinxTransform):
"""Gather bibliography entries to tail of document.
Before::
<document>
<paragraph>
blah blah blah
<citation>
...
<paragraph>
blah blah blah
<citation>
...
...
After::
<document>
<paragraph>
blah blah blah
<paragraph>
blah blah blah
...
<thebibliography>
<citation>
...
<citation>
...
"""
default_priority = 750
def apply(self, **kwargs):
# type: (Any) -> None
citations = thebibliography()
for node in self.document.traverse(nodes.citation):
node.parent.remove(node)
citations += node
if len(citations) > 0:
self.document += citations
class CitationReferenceTransform(SphinxTransform):
"""Replace pending_xref nodes for citation by citation_reference.
To handle citation reference easily on LaTeX writer, this converts
pending_xref nodes to citation_reference.
"""
default_priority = 5 # before ReferencesResolver
def apply(self, **kwargs):
# type: (Any) -> None
if self.app.builder.name != 'latex':
return
matcher = NodeMatcher(addnodes.pending_xref, refdomain='std', reftype='citation')
citations = self.env.get_domain('std').data['citations']
for node in self.document.traverse(matcher): # type: addnodes.pending_xref
docname, labelid, _ = citations.get(node['reftarget'], ('', '', 0))
if docname:
citation_ref = nodes.citation_reference('', '', *node.children,
docname=docname, refname=labelid)
node.replace_self(citation_ref)
class MathReferenceTransform(SphinxTransform):
"""Replace pending_xref nodes for math by math_reference.
To handle math reference easily on LaTeX writer, this converts pending_xref
nodes to math_reference.
"""
default_priority = 5 # before ReferencesResolver
def apply(self, **kwargs):
# type: (Any) -> None
if self.app.builder.name != 'latex':
return
equations = self.env.get_domain('math').data['objects']
for node in self.document.traverse(addnodes.pending_xref):
if node['refdomain'] == 'math' and node['reftype'] in ('eq', 'numref'):
docname, _ = equations.get(node['reftarget'], (None, None))
if docname:
refnode = math_reference('', docname=docname, target=node['reftarget'])
node.replace_self(refnode)
class LiteralBlockTransform(SphinxTransform):
"""Replace container nodes for literal_block by captioned_literal_block."""
default_priority = 400
def apply(self, **kwargs):
# type: (Any) -> None
if self.app.builder.name != 'latex':
return
matcher = NodeMatcher(nodes.container, literal_block=True)
for node in self.document.traverse(matcher): # type: nodes.container
newnode = captioned_literal_block('', *node.children, **node.attributes)
node.replace_self(newnode)
class DocumentTargetTransform(SphinxTransform):
"""Add :doc label to the first section of each document."""
default_priority = 400
def apply(self, **kwargs):
# type: (Any) -> None
if self.app.builder.name != 'latex':
return
for node in self.document.traverse(addnodes.start_of_file):
section = node.next_node(nodes.section)
if section:
section['ids'].append(':doc') # special label for :doc:
class IndexInSectionTitleTransform(SphinxTransform):
"""Move index nodes in section title to outside of the title.
LaTeX index macro is not compatible with some handling of section titles
such as uppercasing done on LaTeX side (cf. fncychap handling of ``\\chapter``).
Moving the index node to after the title node fixes that.
Before::
<section>
<title>
blah blah <index entries=[...]/>blah
<paragraph>
blah blah blah
...
After::
<section>
<title>
blah blah blah
<index entries=[...]/>
<paragraph>
blah blah blah
...
"""
default_priority = 400
def apply(self):
for node in self.document.traverse(nodes.title):
if isinstance(node.parent, nodes.section):
for i, index in enumerate(node.traverse(addnodes.index)):
# move the index node next to the section title
node.remove(index)
node.parent.insert(i + 1, index)
| {
"content_hash": "8cbc80ccfe88fd4c4455f1d0df5c3897",
"timestamp": "",
"source": "github",
"line_count": 641,
"max_line_length": 93,
"avg_line_length": 31.282371294851796,
"alnum_prop": 0.5269798523838021,
"repo_name": "lmregus/Portfolio",
"id": "52d5bc9ea544d6d2780c529c698d7b4b7ef1904c",
"size": "20052",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/design_patterns/env/lib/python3.7/site-packages/sphinx/builders/latex/transforms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "27682"
},
{
"name": "C++",
"bytes": "25458"
},
{
"name": "CSS",
"bytes": "12842"
},
{
"name": "HTML",
"bytes": "49171"
},
{
"name": "Java",
"bytes": "99711"
},
{
"name": "JavaScript",
"bytes": "827"
},
{
"name": "Python",
"bytes": "42857"
},
{
"name": "Shell",
"bytes": "5710"
}
],
"symlink_target": ""
} |
"""Sonnet module information, stored in the graph collections."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import six
from sonnet.protos import module_pb2
from sonnet.python.modules import base_errors
import tensorflow as tf
from tensorflow.python.framework import ops
logging = tf.logging
SONNET_COLLECTION_NAME = "sonnet"
ModuleInfo = collections.namedtuple(
"ModuleInfo",
("module_name", "scope_name", "class_name", "connected_subgraphs"))
ConnectedSubGraph = collections.namedtuple(
"ConnectedSubGraph", ("module", "name_scope", "inputs", "outputs"))
_SPARSE_TENSOR_NAME = "SparseTensor"
_SPARSE_TENSOR_FIELD = ("indices", "values", "dense_shape")
class _UnserializableObject(object):
"""Placeholder for object which cannot be serialized."""
# Placeholder for tensor which cannot be found.
_MissingTensor = collections.namedtuple("_MissingTensor", ("name",))
def _is_namedtuple(obj):
"""Returns `True` if `obj` is a `collections.namedtuple`."""
return isinstance(obj, tuple) and hasattr(obj, "_fields")
def _is_iterable(obj):
"""Returns `True` if the object is a supported iterable."""
return isinstance(obj, (list, tuple, dict))
def _graph_element_to_path(graph_element):
"""Returns the path of the given graph element.
Args:
graph_element: A graph element. Currently only `tf.Tensor` is supported.
Returns:
The graph path corresponding to `graph_element` or the empty string if no
path could be found.
"""
if isinstance(graph_element, tf.Tensor):
return graph_element.name
# Returns an empty string when no name is defined. This will be deserialized
# as a `_UnSerializableObject`.
return ""
def _path_to_graph_element(path, graph):
"""Returns the graph element of the given path.
Args:
path: The path of the graph element.
graph: The graph to look into.
Returns:
The graph element or an instance of `_MissingTensor`.
"""
try:
return graph.get_tensor_by_name(path)
except KeyError:
return _MissingTensor(path)
def _to_proto_sparse_tensor(sparse_tensor, nested_proto,
process_leafs, already_processed):
"""Serializes a `tf.SparseTensor` into `nested_proto`.
Args:
sparse_tensor: An instance of `tf.SparseTensor`.
nested_proto: A `module_pb2.NestedData` instance to be filled from
`sparse_tensor`.
process_leafs: A function to be applied to the leaf valued of the nested
structure.
already_processed: Set of already processed objects (used to avoid
infinite recursion).
"""
already_processed.add(id(sparse_tensor))
nested_proto.named_tuple.name = _SPARSE_TENSOR_NAME
for str_key in _SPARSE_TENSOR_FIELD:
tensor = getattr(sparse_tensor, str_key)
nested_proto.named_tuple.map[str_key].value = process_leafs(tensor)
def _from_proto_sparse_tensor(sparse_tensor_proto, process_leafs):
"""Deserializes a `tf.SparseTensor` from `sparse_tensor_proto`.
Args:
sparse_tensor_proto: A proto representing a `tf.SparseTensor`.
process_leafs: A function to be applied to the leaf valued of the nested
structure.
Returns:
An instance of `tf.SparseTensor`.
"""
if not sparse_tensor_proto.HasField("named_tuple"):
raise base_errors.ModuleInfoError(
"Error while deserializing a SparseTensor: expected proto tuple.")
if sparse_tensor_proto.named_tuple.name != _SPARSE_TENSOR_NAME:
raise base_errors.ModuleInfoError(
"Error while deserializing a SparseTensor: The name of the tuple "
"should have been {} but was {}.".format(
_SPARSE_TENSOR_NAME, sparse_tensor_proto.named_tuple.name))
named_tuple_map = sparse_tensor_proto.named_tuple.map
return tf.SparseTensor(
indices=process_leafs(named_tuple_map["indices"].value),
values=process_leafs(named_tuple_map["values"].value),
dense_shape=process_leafs(named_tuple_map["dense_shape"].value))
# This named tuple contains the necessary information to handle a Python
# object which should be handled in a specific way. The "check" field should
# contain a callable returning `True` if the Python object is indeed special
# and the "to_proto" field should contain a custom serializer.
_SpecialTypeInfo = collections.namedtuple("_SpecialTypeInfo",
("check", "to_proto", "from_proto"))
_TO_PROTO_SPECIAL_TYPES = collections.OrderedDict()
_TO_PROTO_SPECIAL_TYPES[_SPARSE_TENSOR_NAME] = _SpecialTypeInfo(
check=lambda obj: isinstance(obj, tf.SparseTensor),
to_proto=_to_proto_sparse_tensor,
from_proto=_from_proto_sparse_tensor)
def _nested_to_proto(nested_value, nested_proto, process_leafs,
already_processed):
"""Serializes `nested_value` into `nested_proto`.
Args:
nested_value: A nested Python value.
nested_proto: A `module_pb2.NestedData` instance to be filled from the value
in `nested_value`.
process_leafs: A function to be applied to the leaf values of the nested
structure.
already_processed: Set of already processed objects (used to avoid
infinite recursion).
Raises:
ModuleInfoError: If `nested_proto` is not an instance of
`module_pb2.NestedData`.
"""
if not isinstance(nested_proto, module_pb2.NestedData):
raise base_errors.ModuleInfoError("Expected module_pb2.NestedData.")
# If this object was already processed, mark as "unserializable"
# to avoid infinite recursion.
if id(nested_value) in already_processed:
nested_proto.value = ""
return
# Check special types.
for type_name, type_info in six.iteritems(_TO_PROTO_SPECIAL_TYPES):
if type_info.check(nested_value):
nested_proto.special_type.name = type_name
type_info.to_proto(
nested_value, nested_proto.special_type.object,
process_leafs, already_processed)
return
# Check standard types.
if _is_iterable(nested_value):
# Mark this container as "already processed" to avoid infinite recursion.
already_processed.add(id(nested_value))
if isinstance(nested_value, dict):
nested_proto.dict.SetInParent()
for key, child in six.iteritems(nested_value):
str_key = str(key)
child_proto = nested_proto.dict.map[str_key]
_nested_to_proto(child, child_proto, process_leafs, already_processed)
elif isinstance(nested_value, tuple):
# NamedTuple?
if _is_namedtuple(nested_value):
nested_proto.named_tuple.name = type(nested_value).__name__
for str_key in nested_value._fields:
child = getattr(nested_value, str_key)
child_proto = nested_proto.named_tuple.map[str_key]
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.tuple.SetInParent()
for child in nested_value:
child_proto = nested_proto.tuple.list.add()
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.list.SetInParent()
for child in nested_value:
child_proto = nested_proto.list.list.add()
_nested_to_proto(child, child_proto, process_leafs, already_processed)
else:
nested_proto.value = process_leafs(nested_value)
def _module_info_to_proto(module_info, export_scope=None):
"""Serializes `module_into`.
Args:
module_info: An instance of `ModuleInfo`.
export_scope: Optional `string`. Name scope to remove.
Returns:
An instance of `module_pb2.SonnetModule`.
"""
def strip_name_scope(name_scope):
return ops.strip_name_scope(name_scope, export_scope)
def process_leafs(value):
return strip_name_scope(_graph_element_to_path(value))
module_info_def = module_pb2.SonnetModule(
module_name=module_info.module_name,
scope_name=strip_name_scope(module_info.scope_name),
class_name=module_info.class_name)
for connected_subgraph in module_info.connected_subgraphs:
connected_subgraph_info_def = module_info_def.connected_subgraphs.add()
connected_subgraph_info_def.name_scope = strip_name_scope(
connected_subgraph.name_scope)
_nested_to_proto(
connected_subgraph.inputs,
connected_subgraph_info_def.inputs,
process_leafs, set())
_nested_to_proto(
connected_subgraph.outputs,
connected_subgraph_info_def.outputs,
process_leafs, set())
return module_info_def
def _nested_from_proto(nested_proto, process_leafs):
"""Deserializes `nested_proto`.
Args:
nested_proto: An instance of `module_pb2.NestedData`.
process_leafs: A function to be applied to the leaf values of the nested
structure.
Returns:
An instance of `string`, `tuple`, `dict` or `namedtuple`.
Raises:
base_errors.ModuleInfoError: If the probobuf is of the wrong type or
if some of its fields are missing.
"""
if not isinstance(nested_proto, module_pb2.NestedData):
raise base_errors.ModuleInfoError("Expected module_pb2.NestedData.")
if nested_proto.HasField("value"):
value = nested_proto.value
if not value:
value = _UnserializableObject()
else:
value = process_leafs(value)
return value
elif nested_proto.HasField("list"):
return [_nested_from_proto(child, process_leafs)
for child in nested_proto.list.list]
elif nested_proto.HasField("tuple"):
return tuple(_nested_from_proto(child, process_leafs)
for child in nested_proto.tuple.list)
elif nested_proto.HasField("dict"):
return {name: _nested_from_proto(child, process_leafs)
for name, child in six.iteritems(nested_proto.dict.map)}
elif nested_proto.HasField("named_tuple"):
tmp_dict = {name: _nested_from_proto(child, process_leafs)
for name, child in six.iteritems(nested_proto.named_tuple.map)}
# Note that this needs to be a named tuple to work with existing usage.
NamedTuple = collections.namedtuple( # pylint: disable=invalid-name
nested_proto.named_tuple.name, tmp_dict.keys())
return NamedTuple(**tmp_dict)
elif nested_proto.HasField("special_type"):
if nested_proto.special_type.name not in _TO_PROTO_SPECIAL_TYPES:
return _UnserializableObject()
type_info = _TO_PROTO_SPECIAL_TYPES[nested_proto.special_type.name]
return type_info.from_proto(nested_proto.special_type.object, process_leafs)
else:
raise base_errors.ModuleInfoError(
"Cannot deserialize a `ModuleInfo` protobuf with no fields.")
def _module_info_from_proto(module_info_def, import_scope=None):
"""Deserializes `module_info_def` proto.
Args:
module_info_def: An instance of `module_pb2.SonnetModule`.
import_scope: Optional `string`. Name scope to use.
Returns:
An instance of `ModuleInfo`.
Raises:
base_errors.ModuleInfoError: If the probobuf is of the wrong type or
if some of its fields are missing.
"""
graph = tf.get_default_graph()
def prepend_name_scope(name_scope):
return ops.prepend_name_scope(name_scope, import_scope)
def process_leafs(name):
return _path_to_graph_element(prepend_name_scope(name), graph)
connected_subgraphs = []
module_info = ModuleInfo(
module_name=module_info_def.module_name,
scope_name=prepend_name_scope(module_info_def.scope_name),
class_name=module_info_def.class_name,
connected_subgraphs=connected_subgraphs)
for connected_subgraph_def in module_info_def.connected_subgraphs:
connected_subgraph = ConnectedSubGraph(
module=module_info,
name_scope=prepend_name_scope(connected_subgraph_def.name_scope),
inputs=_nested_from_proto(
connected_subgraph_def.inputs, process_leafs),
outputs=_nested_from_proto(
connected_subgraph_def.outputs, process_leafs))
connected_subgraphs.append(connected_subgraph)
return module_info
def _module_info_from_proto_safe(module_info_def, import_scope=None):
"""Deserializes the `module_info_def` proto without raising exceptions.
Args:
module_info_def: An instance of `module_pb2.SonnetModule`.
import_scope: Optional `string`. Name scope to use.
Returns:
An instance of `ModuleInfo`.
"""
try:
return _module_info_from_proto(module_info_def, import_scope)
except Exception as e: # pylint: disable=broad-except
logging.warning(
"Error encountered when deserializing sonnet ModuleInfo:\n%s", str(e))
return None
# `to_proto` is already wrapped into a try...except externally but
# `from_proto` isn't. In order to minimize disruption, catch all the exceptions
# happening during `from_proto` and just log them.
ops.register_proto_function(SONNET_COLLECTION_NAME,
module_pb2.SonnetModule,
to_proto=_module_info_to_proto,
from_proto=_module_info_from_proto_safe)
| {
"content_hash": "e578b27182b4622dd2213c3f099a9e76",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 80,
"avg_line_length": 36.34453781512605,
"alnum_prop": 0.6957996146435452,
"repo_name": "rakshit-agrawal/sonnet",
"id": "127ada8a91d3b2be33f01d1d77a6d447162a382e",
"size": "13658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sonnet/python/modules/base_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "76901"
},
{
"name": "Python",
"bytes": "1258835"
},
{
"name": "Shell",
"bytes": "1724"
}
],
"symlink_target": ""
} |
from locust import HttpLocust, TaskSet, task
class WebsiteTasks(TaskSet):
@task
def debug(self):
self.client.get("/debug")
class WebsiteUser(HttpLocust):
task_set = WebsiteTasks
# min_wait = 5000
# max_wait = 15000
| {
"content_hash": "ec4d84774b8a172ec4fbabbaeb002280",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 44,
"avg_line_length": 22.272727272727273,
"alnum_prop": 0.6653061224489796,
"repo_name": "2peppers/shot",
"id": "805f88c61dc34e0b112d2ea8f5f73d46675437a8",
"size": "245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/locustfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "20669"
},
{
"name": "Python",
"bytes": "46509"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.apimanagement import ApiManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-apimanagement
# USAGE
python api_management_get_deleted_service_by_name.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ApiManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.deleted_services.get_by_name(
service_name="apimService3",
location="westus",
)
print(response)
# x-ms-original-file: specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementGetDeletedServiceByName.json
if __name__ == "__main__":
main()
| {
"content_hash": "bb9f30bacccd71bb55069274c56ee387",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 159,
"avg_line_length": 33.515151515151516,
"alnum_prop": 0.7377938517179023,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e0456001a1d339153e195c57f810bc7b25201c0f",
"size": "1574",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/apimanagement/azure-mgmt-apimanagement/generated_samples/api_management_get_deleted_service_by_name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
def load_data(filename):
with open(filename) as f:
n = int(f.readline())
#logging.debug('n is {n}'.format(n=n))
get_line = lambda : f.readline().replace('\n','').replace('\r','')
dna = ''
gene_predict = []
for i in range(0,n/80):
dna += get_line()
if n%80 != 0:
dna += get_line()
g = int(f.readline())
for i in range(0,g):
gene = tuple(f.readline().split())
gene = int(gene[0]),int(gene[1]),int(gene[2])
gene_predict.append(gene)
gene_predict.sort()
return (n, dna, g, gene_predict)
def main():
if len(sys.argv) < 2: sys.exit('Ops! need a file name as argument')
n, dna, g, gene_predict = load_data(sys.argv[1])
logging.debug(' \n n is: {n} \n dna is: {dna} \n g is: {g} \n gene predicitions are: {gp}'.
format(n=n, dna=dna, g=g, gp=gene_predict))
scored = []
chains_score = []
logging.debug('possible chains down bellow:')
for i,gene in enumerate(gene_predict):
chain = []
chain.append(gene)
last_gene = gene
for new_gene in gene_predict[i+1:]:
if new_gene[0] > last_gene[1]:
chain.append(new_gene)
last_gene = new_gene
score = 0
for c in chain:
score += c[2]
chains_score.append(score)
logging.debug('{chain} -> {score}'.format(chain=chain, score=score))
chains_score.sort()
print 'biggest is', chains_score[-1]
if __name__=="__main__":
main() | {
"content_hash": "8bef379618cd09a99feb30a59c2ce2fa",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 93,
"avg_line_length": 24.620689655172413,
"alnum_prop": 0.6078431372549019,
"repo_name": "andersoncardoso/coding_challenges",
"id": "991d18c836225571669b96e3a062816fdaf26d89",
"size": "1460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codejam_and_others/gattaca/gattaca.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1865"
},
{
"name": "Python",
"bytes": "18599"
}
],
"symlink_target": ""
} |
import hashlib
import mimetools
import os
import shutil
import subprocess
import sys
import tempfile
import urllib2
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from rbtools import __version__, __version_info__, is_release
PY_VERSIONS = ["2.4", "2.5", "2.6", "2.7"]
LATEST_PY_VERSION = PY_VERSIONS[-1]
PACKAGE_NAME = 'RBTools'
RELEASES_URL = \
'reviewboard.org:/var/www/downloads.reviewboard.org/' \
'htdocs/releases/%s/%s.%s/' % (PACKAGE_NAME,
__version_info__[0],
__version_info__[1])
RBWEBSITE_API_URL = 'http://www.reviewboard.org/api/'
RELEASES_API_URL = '%sproducts/rbtools/releases/' % RBWEBSITE_API_URL
built_files = []
def load_config():
filename = os.path.join(os.path.expanduser('~'), '.rbwebsiterc')
if not os.path.exists(filename):
sys.stderr.write("A .rbwebsiterc file must exist in the form of:\n")
sys.stderr.write("\n")
sys.stderr.write("USERNAME = '<username>'\n")
sys.stderr.write("PASSWORD = '<password>'\n")
sys.exit(1)
user_config = {}
try:
execfile(filename, user_config)
except SyntaxError, e:
sys.stderr.write('Syntax error in config file: %s\n'
'Line %i offset %i\n' % (filename, e.lineno, e.offset))
sys.exit(1)
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password(realm='Web API',
uri=RBWEBSITE_API_URL,
user=user_config['USERNAME'],
passwd=user_config['PASSWORD'])
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
def execute(cmdline):
if isinstance(cmdline, list):
print ">>> %s" % subprocess.list2cmdline(cmdline)
else:
print ">>> %s" % cmdline
p = subprocess.Popen(cmdline,
shell=True,
stdout=subprocess.PIPE)
s = ''
for data in p.stdout.readlines():
s += data
sys.stdout.write(data)
rc = p.wait()
if rc != 0:
print "!!! Error invoking command."
sys.exit(1)
return s
def run_setup(target, pyver = LATEST_PY_VERSION):
execute("python%s ./setup.py release %s" % (pyver, target))
def clone_git_tree(git_dir):
new_git_dir = tempfile.mkdtemp(prefix='rbtools-release.')
os.chdir(new_git_dir)
execute('git clone %s .' % git_dir)
return new_git_dir
def build_targets():
for pyver in PY_VERSIONS:
run_setup("bdist_egg", pyver)
built_files.append("dist/%s-%s-py%s.egg" %
(PACKAGE_NAME, __version__, pyver))
run_setup("sdist")
built_files.append("dist/%s-%s.tar.gz" %
(PACKAGE_NAME, __version__))
def build_checksums():
sha_filename = 'dist/%s-%s.sha256sum' % (PACKAGE_NAME, __version__)
out_f = open(sha_filename, 'w')
for filename in built_files:
m = hashlib.sha256()
in_f = open(filename, 'r')
m.update(in_f.read())
in_f.close()
out_f.write('%s %s\n' % (m.hexdigest(), os.path.basename(filename)))
out_f.close()
built_files.append(sha_filename)
def upload_files():
execute("scp %s %s" % (" ".join(built_files), RELEASES_URL))
def tag_release():
execute("git tag release-%s" % __version__)
def register_release():
if __version_info__[4] == 'final':
run_setup("register")
scm_revision = execute(['git rev-parse', 'release-%s' % __version__])
data = {
'major_version': __version_info__[0],
'minor_version': __version_info__[1],
'micro_version': __version_info__[2],
'release_type': __version_info__[3],
'release_num': __version_info__[4],
'scm_revision': scm_revision,
}
boundary = mimetools.choose_boundary()
content = ''
for key, value in data.iteritems():
content += '--%s\r\n' % boundary
content += 'Content-Disposition: form-data; name="%s"\r\n' % key
content += '\r\n'
content += str(value) + '\r\n'
content += '--%s--\r\n' % boundary
content += '\r\n'
headers = {
'Content-Type': 'multipart/form-data; boundary=%s' % boundary,
'Content-Length': str(len(content)),
}
print 'Posting release to reviewboard.org'
try:
f = urllib2.urlopen(urllib2.Request(url=RELEASES_API_URL, data=content,
headers=headers))
f.read()
except urllib2.HTTPError, e:
print "Error uploading. Got HTTP code %d:" % e.code
print e.read()
except urllib2.URLError, e:
try:
print "Error uploading. Got URL error:" % e.code
print e.read()
except AttributeError:
pass
def main():
if not os.path.exists("setup.py"):
sys.stderr.write("This must be run from the root of the "
"Djblets tree.\n")
sys.exit(1)
load_config()
if not is_release():
sys.stderr.write('This has not been marked as a release in '
'rbtools/__init__.py\n')
sys.exit(1)
cur_dir = os.getcwd()
git_dir = clone_git_tree(cur_dir)
build_targets()
build_checksums()
upload_files()
os.chdir(cur_dir)
shutil.rmtree(git_dir)
tag_release()
register_release()
if __name__ == "__main__":
main()
| {
"content_hash": "380be7fa79f4ed86e184a0076fd94cc9",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 80,
"avg_line_length": 26.033175355450236,
"alnum_prop": 0.5545239395594392,
"repo_name": "blair/pkg-rbtools",
"id": "cad99d4db083e50d2ba66202ef65faa9335bb5fe",
"size": "5630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/internal/release.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "142215"
}
],
"symlink_target": ""
} |
"""Compile a subset of LaTeX to HTML."""
from __future__ import unicode_literals
import os
import io
import nikola.plugin_categories
import nikola.utils
import re
import json
from . import parser, htmlify
LOGGER = nikola.utils.get_logger('compile_latex', nikola.utils.STDERR_HANDLER)
class LaTeXContext(object):
"""Represent a context for LaTeX post compilation.
Allows to add dependencies, store data, and resolve links.
"""
id = None
def __init__(self, id, lang, thm_names, name=None):
"""Initialize context."""
self.id = id
self.name = name
self.lang = lang
self.thm_names = thm_names
self.__file_deps_fragment = set()
self.__file_deps_page = set()
self.__uptodate_deps_fragment = list()
self.__uptodate_deps_page = list()
self.__plugin_data = {}
self.__link_providers = []
def get_name(self):
"""Return name associated to context."""
return '(unknown:{0})'.format(self.id) if self.name is None else self.name
def add_file_dependency(self, filename, add='both'):
"""Add file dependency to post. Similar to Post.add_file_dependency."""
if add not in {'fragment', 'page', 'both'}:
raise Exception("Add parameter is '{0}', but must be either 'fragment', 'page', or 'both'.".format(add))
if add == 'fragment' or add == 'both':
self.__file_deps_fragment.add(filename)
if add == 'page' or add == 'both':
self.__file_deps_page.add(filename)
def add_uptodate_dependency(self, name, uptodate_dependency, add='both'):
"""Add doit uptodate dependency to post. Similar to Post.add_uptodate_dependency."""
if add not in {'fragment', 'page', 'both'}:
raise Exception("Add parameter is '{0}', but must be either 'fragment', 'page', or 'both'.".format(add))
if add == 'fragment' or add == 'both':
self.__uptodate_deps_fragment.append({'name': name, 'deps': uptodate_dependency})
if add == 'page' or add == 'both':
self.__uptodate_deps_page.append({'name': name, 'deps': uptodate_dependency})
def add_link_provider(self, link_provider):
"""Add a link provider to the context."""
self.__link_providers.append(link_provider)
def has_dependencies(self):
"""Check whether dependencies are available."""
return (len(self.__file_deps_fragment) > 0 or len(self.__file_deps_page) > 0 or
len(self.__uptodate_deps_fragment) > 0 or len(self.__uptodate_deps_page) > 0)
def get_file_dependencies_fragment(self):
"""Retrieve file dependencies for fragment generation."""
return sorted(list(self.__file_deps_fragment))
def get_file_dependencies_page(self):
"""Retrieve file dependencies for page generation."""
return sorted(list(self.__file_deps_page))
def get_uptodate_dependencies_fragment(self):
"""Retrieve doit uptodate dependencies for fragment generation."""
return self.__uptodate_deps_fragment
def get_uptodate_dependencies_page(self):
"""Retrieve doit uptodate dependencies for page generation."""
return self.__uptodate_deps_page
def store_plugin_data(self, plugin_name, key, data):
"""Store plugin-specific data in context."""
if plugin_name not in self.__plugin_data:
self.__plugin_data[plugin_name] = {}
self.__plugin_data[plugin_name][key] = data
def get_plugin_data(self, plugin_name, key, default_value=None):
"""Retrieve plugin-specific data from context."""
plugin_data = self.__plugin_data.get(plugin_name)
return default_value if plugin_data is None else plugin_data.get(key, default_value)
def inc_plugin_counter(self, plugin_name, key):
"""Provide simple plugin-specific counter for plugin to access."""
counter = self.get_plugin_data(plugin_name, key, 0) + 1
self.store_plugin_data(plugin_name, key, counter)
return counter
def __str__(self):
"""Return string representation."""
return 'LaTeXContext<{0}>({1}, {2}, {3}, {4})'.format(self.id, self.__file_deps_fragment, self.__file_deps_page, self.__uptodate_deps_fragment, self.__uptodate_deps_page)
def provide_link(self, reference):
"""Resolve link to reference. Returns pair (URL, label)."""
idx = reference.find('::')
if idx < 0:
return '#{0}'.format(reference), reference
else:
site, label = reference[:idx], reference[idx + len('::'):]
for link_provider in self.__link_providers:
result = link_provider.provide_link(site, label, self.lang)
if result is not None:
return result
raise Exception("Cannot provide link for site '{0}' with label '{1}'!".format(site, label))
class CompileLaTeX(nikola.plugin_categories.PageCompiler):
"""Compiles a subset of LaTeX into HTML."""
name = 'latex'
demote_headers = True
use_dep_files = False
def __init__(self):
"""Create page compiler object."""
super(CompileLaTeX, self).__init__()
self.__beautify = True
self.__parsing_environment = parser.ParsingEnvironment()
self.__link_providers = {}
def set_site(self, site):
"""Set Nikola site object."""
super(CompileLaTeX, self).set_site(site)
# Classify plugins
renderer_plugins = {}
other_plugins = {}
for plugin in self.get_compiler_extensions():
try:
if plugin.plugin_object.latex_plugin_type == 'formula_renderer':
LOGGER.debug('Found LaTeX formula renderer plugin {0}'.format(plugin.name))
renderer_plugins[plugin.name] = plugin
else:
LOGGER.warn('Found unknown LaTeX page compiler plugin {0}!'.format(plugin.name))
other_plugins[plugin.name] = plugin
plugin.plugin_object.initialize(self, self.__parsing_environment)
except Exception:
LOGGER.error('Found broken LaTeX page compiler plugin {0}!'.format(plugin.name))
# Look for formula renderer
renderer_name = site.config.get('LATEX_FORMULA_RENDERER', 'latex_formula_image_renderer')
if renderer_name not in renderer_plugins:
raise Exception("Unknown formula renderer '{}'!".format(renderer_name))
self.__formula_renderer = renderer_plugins[renderer_name].plugin_object
self.__formula_renderer_name = renderer_name
self.__plugins = list(other_plugins.values())
self.__all_plugins = [self.__formula_renderer] + self.__plugins
# Configure plugins
for plugin in self.__all_plugins:
plugin.initialize(self, self.__parsing_environment)
def _get_dep_filename(self, post, lang):
"""Retrieve dependency filename."""
return post.translated_base_path(lang) + '.ltxdep'
def get_extra_targets(self, post, lang, dest):
"""Retrieve extra targets generated by page compiler."""
result = [self._get_dep_filename(post, lang)]
for plugin in self.__all_plugins:
result += plugin.get_extra_targets(post, lang, dest)
return result
def _read_extra_deps(self, post, lang):
"""Read extra dependencies from JSON file."""
dep_path = self._get_dep_filename(post, lang)
if os.path.isfile(dep_path):
with io.open(dep_path, 'rb') as file:
result = json.loads(file.read().decode('utf-8'))
if type(result) == list and len(result) == 4:
return result
return ([], [], [], [])
def _add_extra_deps(self, post, lang, what, where):
"""Return a list of extra dependencies for given post and language.
``what`` can be ``file`` or ``uptodate`` and describes what kind
of dependency can be added, and ``where`` can be ``fragment`` or
``page`` and describes where the dependency will be added.
"""
result = []
# Get dependencies from disk
idx = 1 if where == 'fragment' else 0
if what == 'uptodate':
for uptodate_data in self._read_extra_deps(post, lang)[2 + idx]:
result.append(nikola.utils.config_changed(uptodate_data['deps'], uptodate_data['name']))
else:
result.extend(self._read_extra_deps(post, lang)[0 + idx])
# Add own dependencies
if what == 'uptodate' and where == 'fragment':
result.append(nikola.utils.config_changed({
'formula_renderer': self.__formula_renderer_name,
'theorem_names': self._get_theorem_names(lang),
}, 'latex_page_compiler:config'))
# Add plugin dependencies
for plugin in self.__all_plugins:
result.extend(plugin.add_extra_deps(post, lang, what, where))
return result
def register_extra_dependencies(self, post):
"""Register extra dependencies extractor."""
def register(lang, where):
"""Create language- and where-dependent extractors."""
post.add_dependency(lambda: self._add_extra_deps(post, lang, 'file', where), where, lang=lang)
post.add_dependency_uptodate(lambda: self._add_extra_deps(post, lang, 'uptodate', where), True, where, lang=lang)
for lang in self.site.config['TRANSLATIONS']:
for where in ['fragment', 'page']:
register(lang, where)
def get_parsing_environment(self):
"""Retrieve parsing environment. See ``parser.ParsingEnvironment`` for documentation."""
return self.__parsing_environment
def _format_data(self, data, latex_context):
"""Parse and HTMLify data from string, given LaTeX context."""
tree = parser.parse(data, self.__parsing_environment, filename=latex_context.name)
result = htmlify.HTMLify(tree, self.__formula_renderer, latex_context, beautify=self.__beautify, outer_indent=0)
for plugin in self.__all_plugins:
result = plugin.modify_html_output(result, latex_context)
return result
def _get_theorem_names(self, lang):
"""Get language-dependent theorem environment names from messages."""
thm_names = {}
for name in ['thm_name', 'prop_name', 'cor_name', 'lemma_name', 'def_name', 'defs_name', 'proof_name', 'example_name', 'examples_name', 'remark_name', 'remarks_name']:
thm_names[name] = self.site.MESSAGES('math_{0}'.format(name), lang)
return thm_names
def _compile_string_impl(self, data, source_path=None, is_two_file=True, post=None, lang=None, link_providers=None):
"""Compile to string implementation."""
try:
if not is_two_file:
data = re.split('(\n\n|\r\n\r\n)', data, maxsplit=1)[-1]
latex_context = LaTeXContext(hash(data), lang=lang, thm_names=self._get_theorem_names(lang), name=source_path)
if link_providers:
for link_provider in link_providers:
latex_context.add_link_provider(link_provider)
for plugin in self.__all_plugins:
plugin.before_processing(latex_context, source_path, post)
output = self._format_data(data, latex_context)
for plugin in self.__all_plugins:
plugin.after_processing(latex_context, source_path, post)
return (output, latex_context, []) # last part are shortcode dependencies
except Exception as e:
import traceback
traceback.print_exc()
raise e
def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None):
"""Compile the source file into HTML strings."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
return self._compile_string_impl(data, source_path=source_path, is_two_file=is_two_file, post=post, lang=lang)
def compile_to_string(self, source_data, name=None):
"""Old, deprecated interface."""
return self._compile_string_impl(source_data, source_path=name, lang=nikola.utils.LocaleBorg().current_lang)[0]
def _write_deps(self, latex_context, deps_path):
"""Write dependencies into JSON file."""
data = (latex_context.get_file_dependencies_fragment(), latex_context.get_file_dependencies_page(),
latex_context.get_uptodate_dependencies_fragment(), latex_context.get_uptodate_dependencies_page())
with io.open(deps_path, 'wb') as file:
file.write(json.dumps(data).encode('utf-8'))
def add_link_provider(self, source, link_provider):
"""Add link provider to plugin. Will be added to LaTeX contexts automatically."""
if source not in self.__link_providers:
self.__link_providers[source] = []
self.__link_providers[source].append(link_provider)
def compile(self, source, dest, is_two_file=True, post=None, lang=None):
"""Compile the source, save it on dest."""
nikola.utils.makedirs(os.path.dirname(dest))
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
try:
with io.open(dest, 'w+', encoding='utf8') as out_file:
with io.open(source, 'r', encoding='utf8') as in_file:
data = in_file.read()
output, latex_context, _ = self._compile_string_impl(data, source_path=source, is_two_file=is_two_file, post=post,
lang=lang, link_providers=self.__link_providers.get(source))
# Write post
out_file.write(output)
# Write dependencies
if post is None:
deps_path = dest + '.wpdep'
else:
deps_path = self._get_dep_filename(post, lang)
self._write_deps(latex_context, deps_path)
# Add dependencies and write formulae info
if post is not None:
for fn in latex_context.get_file_dependencies_fragment():
post.add_dependency(fn, add='fragment', lang=lang)
for fn in latex_context.get_file_dependencies_page():
post.add_dependency(fn, add='page', lang=lang)
for uptodate_data in latex_context.get_uptodate_dependencies_fragment():
post.add_dependency_uptodate(nikola.utils.config_changed(uptodate_data['deps'], uptodate_data['name']), add='fragment', lang=lang)
for uptodate_data in latex_context.get_uptodate_dependencies_page():
post.add_dependency_uptodate(nikola.utils.config_changed(uptodate_data['deps'], uptodate_data['name']), add='page', lang=lang)
for plugin in self.__all_plugins:
plugin.write_extra_targets(post, lang, dest, latex_context)
except Exception:
# If an exception was raised, remove output file and re-raise it
try:
os.unlink(dest)
except Exception:
pass
raise
def create_post(self, path, content=None, onefile=False, is_page=False, **kw):
"""Create empty post."""
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
nikola.utils.makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
with io.open(path, 'w+', encoding='utf8') as fd:
if onefile:
fd.write(nikola.utils.write_metadata(metadata))
fd.write('\n\n')
fd.write(content)
| {
"content_hash": "1692fae8ff58bbddc86f6b00d9cc6a1b",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 178,
"avg_line_length": 47.103857566765576,
"alnum_prop": 0.6021796648607787,
"repo_name": "getnikola/plugins",
"id": "baf9e3140dc912ad70478902bb7ca47146e4cd13",
"size": "17004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v7/latex/latex/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8729"
},
{
"name": "Emacs Lisp",
"bytes": "8804"
},
{
"name": "HTML",
"bytes": "2470"
},
{
"name": "JavaScript",
"bytes": "41087"
},
{
"name": "Python",
"bytes": "1157045"
},
{
"name": "TeX",
"bytes": "844"
}
],
"symlink_target": ""
} |
import unittest
from nosedep import depends
class DisplayNodeList(unittest.TestCase):
@depends(after='test_02')
def test_01(self):
pass
@depends(after='test_03')
def test_02(self):
pass
@depends(after='test_04')
def test_03(self):
pass
def test_04(self):
pass
| {
"content_hash": "6b8ec68fea58b841dd865247868ef1eb",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 41,
"avg_line_length": 17.105263157894736,
"alnum_prop": 0.6030769230769231,
"repo_name": "Zitrax/nose-dep",
"id": "e4a25cd0514f94c090e9e27d5be06fb2a338b541",
"size": "325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_scripts/dir_test/dir_test_2/dir_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41635"
}
],
"symlink_target": ""
} |
import ConfigParser
import os
from crontab import CronTab
import MySQLdb
import yum
import sys
import subprocess
import shutil
#This first section is about interpteting the configuration file This seciont includes details on how
# to confugre the variables that are in the first section and convert them to be used by the tests.
#Running this script will Re-Iniate the table, so if it does exists it will be deleted. There will be a prompt asking for you input to continue
#This section is used for the variables
config = ConfigParser.ConfigParser()
config.readfp(open('/etc/curl/curl.conf'))
#url_number = config.get('Section 1', 'url_number')
#url_number = int(url_number)
#tests_per_min = int(60/int(config.get('Section 1', 'seconds_per_test')))
#user_agent_string = config.get('Section 1', 'user_agent_string')
#wait_time = int(config.get('Section 1','seconds_per_test'))
test_script = open(config.get('Section 2', 'big_curl_script_location'), 'w')
curl_file = config.get('Section 2', 'curl_script_location')
csv_file = config.get('Section 2', 'curl_csv_output')
curl_csv_directory = config.get('Section 2', 'curl_csv_directory')
#curl_script_frequency = int(config.get('Section 3', 'curl_script_frequency'))
#db_script_frequency = int(config.get('Section 4', 'local_db_update'))
#cron_user = str(config.get('Section 3', 'cron_user'))
#remote_db_script_frequency = int(config.get('Section 5', 'remote_db_update'))
local_db_upload_file = config.get('Section 4', 'local_db_upload')
local_db_copy_file = config.get('Section 4', 'local_db_copy')
local_db_user= str(config.get('Section 4', 'local_database_un'))
local_db_password= str(config.get('Section 4', 'local_database_password'))
local_database_url_ip= str(config.get('Section 4', 'local_database_url_ip'))
local_database_name= str(config.get('Section 4', 'local_database_name'))
local_table_name= str(config.get('Section 4', 'local_table_name'))
remote_upload_file_location= config.get('Section 5', 'remote_upload_file_location')
# This section of the script is done to install the base hardware that is required to run the scripts
# and store the data locally
# This includes making sure that a database exists,and if not installing that database.
#This section is a check to see if there is a local datbase with the correct parameters installed. If
#not it will install the correct database
#if the table you are trying to use for this iniation script exists the table will be overwritten.
#if you would like to create a new table for this test please change in the config file
try
mydb = MySQLdb.connect(host=local_database_url_ip,
user=local_db_user,
passwd=local_db_password,
db=local_database_name)
cursor = mydb.cursor()
cursor.execute("Select table_name from information_schema.tables WHERE table_name = '" + local_table_name + "'")
table_exists_results = cursor.fetchone()
if table_exists_results:
print "Table Exists"
delete_question = raw_input("Would you like to Continue? Process Will Delete Table!(y/n): ")
if "y" is not in delete_question:
exit
else:
cursor.execute("DROP TABLE" + local_table_name)
mydb.commit()
cursor.close()
else:
print "Table Doesn't Exists. Will attempt to create one"
except MySQLdb.Error:
print "Can't Connect to DB. Will attempt to install one"
#This section here is to run yum to install a new database. This will default to using mariadb
yb=yum.YumBase()
inst = yb.rpmdb.returnPackages()
installed=[x.name for x in inst]
packages=['mariadb']
for package in packages:
if package in installed:
print('{0} is already installed'.format(package))
else:
print('Installing {0}'.format(package))
kwarg = {
'name':package
}
yb.install(**kwarg)
yb.resolveDeps()
yb.buildTransaction()
yb.processTransaction()
start_db_command = ["/usr/bin/service mariadb start"]
persistant_db_command = ["chkconfig mariadb on"]
subprocess.call(start_db_command, shell=True)
subprocess.call(persistant_db_command, shell=True)
#This will pull us out of our loops. The next steps will be to create the table
#this is done by connecting to the local db creating the table with the required rows for the service
mydb = MySQLdb.connect(host=local_database_url_ip,
user=local_db_user,
passwd=local_db_password,
db=local_database_name)
cursor = mydb.cursor()
build_table= (
"Create Table " + local_table_name +
" (id int(10) NOT NULL auto_increment, "
"time_id int(10) NOT NULL, "
"date datetime NOT NULL, "
"url varchar(250) NOT NULL, "
"user_agent varchar(250) default NULL, "
"url_e varchar(250) NOT NULL, "
"http_code int(3) NOT NULL, "
"ip_port varchar(50) NOT NULL, "
"download_speed int(10) NOT NULL, "
"upload_speed int(10) NOT NULL, "
"dns_lookup_time decimal(6,3) NOT NULL, "
"tcp_connect_time decimal(6,3) NOT NULL, "
"ssl_connect_time decimal(6,3) NOT NULL, "
"pretransfer_time decimal(6,3) NOT NULL, "
"redirect_time decimal(6,3) NOT NULL, "
"starttransfer_time decimal(6,3) NOT NULL, "
"total_time decimal(6,3) NOT NULL, "
"PRIMARY KEY (id)"
)
cursor.execute( build_table)
#building the indexes for the local table
build_index1 = ("Create Index url ON " + local_table_name + "(url)")
cursor.execute(build_index1)
build_index2 = ("Create Index user_agent ON " + local_table_name + "(user_agent)")
cursor.execute(build_index2)
build_index3 = ("Create Index date ON " + local_table_name + "(date)")
cursor.execute(build_index3)
mydb.commit()
cursor.close()
#now we have a local db built. The next steps are to move the required files to their required locations
shutil.copyfile(csv_copy.py, local_db_copy_file)
shutil.copyfile(datbase_upload, local_db_upload_file)
shutil.copyfile(curl2.sh, curl_file)
shutil.copyfile(remote_database_upload.py, curl_file)
#creating temp curl folder
os.makedirs(curl_csv_directory)
print "Please Run curltest12.py to start the inital load of testing"
exit
| {
"content_hash": "de3d8c8ad5f4a244dfc7b645da0c338e",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 143,
"avg_line_length": 38.436708860759495,
"alnum_prop": 0.7095340029639388,
"repo_name": "manhof/test_isp_curl",
"id": "3b469ab982ee566756fd996230ebd3e7fd16e5c8",
"size": "6092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "curltestconifgscript.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17847"
},
{
"name": "Shell",
"bytes": "27306"
}
],
"symlink_target": ""
} |
import asyncio
import json
import logging
import os
from async_generator import asynccontextmanager
from typing import Text, Union, Optional, AsyncGenerator
from rasa.shared.exceptions import RasaException, ConnectionException
import rasa.shared.utils.common
from rasa.core.constants import DEFAULT_LOCK_LIFETIME
from rasa.core.lock import TicketLock
from rasa.utils.endpoints import EndpointConfig
logger = logging.getLogger(__name__)
def _get_lock_lifetime() -> int:
return int(os.environ.get("TICKET_LOCK_LIFETIME", 0)) or DEFAULT_LOCK_LIFETIME
LOCK_LIFETIME = _get_lock_lifetime()
DEFAULT_SOCKET_TIMEOUT_IN_SECONDS = 10
DEFAULT_REDIS_LOCK_STORE_KEY_PREFIX = "lock:"
# noinspection PyUnresolvedReferences
class LockError(RasaException):
"""Exception that is raised when a lock cannot be acquired.
Attributes:
message (str): explanation of which `conversation_id` raised the error
"""
pass
class LockStore:
@staticmethod
def create(obj: Union["LockStore", EndpointConfig, None]) -> "LockStore":
"""Factory to create a lock store."""
if isinstance(obj, LockStore):
return obj
try:
return _create_from_endpoint_config(obj)
except ConnectionError as error:
raise ConnectionException("Cannot connect to lock store.") from error
@staticmethod
def create_lock(conversation_id: Text) -> TicketLock:
"""Create a new `TicketLock` for `conversation_id`."""
return TicketLock(conversation_id)
def get_lock(self, conversation_id: Text) -> Optional[TicketLock]:
"""Fetch lock for `conversation_id` from storage."""
raise NotImplementedError
def delete_lock(self, conversation_id: Text) -> None:
"""Delete lock for `conversation_id` from storage."""
raise NotImplementedError
def save_lock(self, lock: TicketLock) -> None:
"""Commit `lock` to storage."""
raise NotImplementedError
def issue_ticket(
self, conversation_id: Text, lock_lifetime: float = LOCK_LIFETIME
) -> int:
"""Issue new ticket with `lock_lifetime` for lock associated with
`conversation_id`.
Creates a new lock if none is found.
"""
logger.debug(f"Issuing ticket for conversation '{conversation_id}'.")
try:
lock = self.get_or_create_lock(conversation_id)
ticket = lock.issue_ticket(lock_lifetime)
self.save_lock(lock)
return ticket
except Exception as e:
raise LockError(f"Error while acquiring lock. Error:\n{e}")
@asynccontextmanager
async def lock(
self,
conversation_id: Text,
lock_lifetime: float = LOCK_LIFETIME,
wait_time_in_seconds: float = 1,
) -> AsyncGenerator[TicketLock, None]:
"""Acquire lock with lifetime `lock_lifetime`for `conversation_id`.
Try acquiring lock with a wait time of `wait_time_in_seconds` seconds
between attempts. Raise a `LockError` if lock has expired.
"""
ticket = self.issue_ticket(conversation_id, lock_lifetime)
try:
yield await self._acquire_lock(
conversation_id, ticket, wait_time_in_seconds
)
finally:
self.cleanup(conversation_id, ticket)
async def _acquire_lock(
self, conversation_id: Text, ticket: int, wait_time_in_seconds: float
) -> TicketLock:
logger.debug(f"Acquiring lock for conversation '{conversation_id}'.")
while True:
# fetch lock in every iteration because lock might no longer exist
lock = self.get_lock(conversation_id)
# exit loop if lock does not exist anymore (expired)
if not lock:
break
# acquire lock if it isn't locked
if not lock.is_locked(ticket):
logger.debug(f"Acquired lock for conversation '{conversation_id}'.")
return lock
items_before_this = ticket - (lock.now_serving or 0)
logger.debug(
f"Failed to acquire lock for conversation ID '{conversation_id}' "
f"because {items_before_this} other item(s) for this "
f"conversation ID have to be finished processing first. "
f"Retrying in {wait_time_in_seconds} seconds ..."
)
# sleep and update lock
await asyncio.sleep(wait_time_in_seconds)
self.update_lock(conversation_id)
raise LockError(
f"Could not acquire lock for conversation_id '{conversation_id}'."
)
def update_lock(self, conversation_id: Text) -> None:
"""Fetch lock for `conversation_id`, remove expired tickets and save lock."""
lock = self.get_lock(conversation_id)
if lock:
lock.remove_expired_tickets()
self.save_lock(lock)
def get_or_create_lock(self, conversation_id: Text) -> TicketLock:
"""Fetch existing lock for `conversation_id` or create a new one if
it doesn't exist."""
existing_lock = self.get_lock(conversation_id)
if existing_lock:
return existing_lock
return self.create_lock(conversation_id)
def is_someone_waiting(self, conversation_id: Text) -> bool:
"""Return whether someone is waiting for lock associated with
`conversation_id`."""
lock = self.get_lock(conversation_id)
if lock:
return lock.is_someone_waiting()
return False
def finish_serving(self, conversation_id: Text, ticket_number: int) -> None:
"""Finish serving ticket with `ticket_number` for `conversation_id`.
Removes ticket from lock and saves lock.
"""
lock = self.get_lock(conversation_id)
if lock:
lock.remove_ticket_for(ticket_number)
self.save_lock(lock)
def cleanup(self, conversation_id: Text, ticket_number: int) -> None:
"""Remove lock for `conversation_id` if no one is waiting."""
self.finish_serving(conversation_id, ticket_number)
if not self.is_someone_waiting(conversation_id):
self.delete_lock(conversation_id)
@staticmethod
def _log_deletion(conversation_id: Text, deletion_successful: bool) -> None:
if deletion_successful:
logger.debug(f"Deleted lock for conversation '{conversation_id}'.")
else:
logger.debug(f"Could not delete lock for conversation '{conversation_id}'.")
class RedisLockStore(LockStore):
"""Redis store for ticket locks."""
def __init__(
self,
host: Text = "localhost",
port: int = 6379,
db: int = 1,
password: Optional[Text] = None,
use_ssl: bool = False,
key_prefix: Optional[Text] = None,
socket_timeout: float = DEFAULT_SOCKET_TIMEOUT_IN_SECONDS,
) -> None:
"""Create a lock store which uses Redis for persistence.
Args:
host: The host of the redis server.
port: The port of the redis server.
db: The name of the database within Redis which should be used by Rasa
Open Source.
password: The password which should be used for authentication with the
Redis database.
use_ssl: `True` if SSL should be used for the connection to Redis.
key_prefix: prefix to prepend to all keys used by the lock store. Must be
alphanumeric.
socket_timeout: Timeout in seconds after which an exception will be raised
in case Redis doesn't respond within `socket_timeout` seconds.
"""
import redis
self.red = redis.StrictRedis(
host=host,
port=int(port),
db=int(db),
password=password,
ssl=use_ssl,
socket_timeout=socket_timeout,
)
self.key_prefix = DEFAULT_REDIS_LOCK_STORE_KEY_PREFIX
if key_prefix:
logger.debug(f"Setting non-default redis key prefix: '{key_prefix}'.")
self._set_key_prefix(key_prefix)
super().__init__()
def _set_key_prefix(self, key_prefix: Text) -> None:
if isinstance(key_prefix, str) and key_prefix.isalnum():
self.key_prefix = key_prefix + ":" + DEFAULT_REDIS_LOCK_STORE_KEY_PREFIX
else:
logger.warning(
f"Omitting provided non-alphanumeric redis key prefix: '{key_prefix}'. "
f"Using default '{self.key_prefix}' instead."
)
def get_lock(self, conversation_id: Text) -> Optional[TicketLock]:
"""Retrieves lock (see parent docstring for more information)."""
serialised_lock = self.red.get(self.key_prefix + conversation_id)
if serialised_lock:
return TicketLock.from_dict(json.loads(serialised_lock))
return None
def delete_lock(self, conversation_id: Text) -> None:
"""Deletes lock for conversation ID."""
deletion_successful = self.red.delete(self.key_prefix + conversation_id)
self._log_deletion(conversation_id, deletion_successful)
def save_lock(self, lock: TicketLock) -> None:
self.red.set(self.key_prefix + lock.conversation_id, lock.dumps())
class InMemoryLockStore(LockStore):
"""In-memory store for ticket locks."""
def __init__(self) -> None:
self.conversation_locks = {}
super().__init__()
def get_lock(self, conversation_id: Text) -> Optional[TicketLock]:
return self.conversation_locks.get(conversation_id)
def delete_lock(self, conversation_id: Text) -> None:
deleted_lock = self.conversation_locks.pop(conversation_id, None)
self._log_deletion(
conversation_id, deletion_successful=deleted_lock is not None
)
def save_lock(self, lock: TicketLock) -> None:
self.conversation_locks[lock.conversation_id] = lock
def _create_from_endpoint_config(
endpoint_config: Optional[EndpointConfig] = None,
) -> "LockStore":
"""Given an endpoint configuration, create a proper `LockStore` object."""
if (
endpoint_config is None
or endpoint_config.type is None
or endpoint_config.type == "in_memory"
):
# this is the default type if no lock store type is set
lock_store = InMemoryLockStore()
elif endpoint_config.type == "redis":
lock_store = RedisLockStore(host=endpoint_config.url, **endpoint_config.kwargs)
else:
lock_store = _load_from_module_name_in_endpoint_config(endpoint_config)
logger.debug(f"Connected to lock store '{lock_store.__class__.__name__}'.")
return lock_store
def _load_from_module_name_in_endpoint_config(
endpoint_config: EndpointConfig,
) -> "LockStore":
"""Retrieve a `LockStore` based on its class name."""
try:
lock_store_class = rasa.shared.utils.common.class_from_module_path(
endpoint_config.type
)
return lock_store_class(endpoint_config=endpoint_config)
except (AttributeError, ImportError) as e:
raise Exception(
f"Could not find a class based on the module path "
f"'{endpoint_config.type}'. Failed to create a `LockStore` "
f"instance. Error: {e}"
)
| {
"content_hash": "68953699f0cb618e6ad7c937a0f50ca7",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 88,
"avg_line_length": 34.438066465256796,
"alnum_prop": 0.6241775594350382,
"repo_name": "RasaHQ/rasa_nlu",
"id": "ac78da3df30f95b31fdb4a80114efd1c68bd6067",
"size": "11399",
"binary": false,
"copies": "1",
"ref": "refs/heads/emptystring_10504",
"path": "rasa/core/lock_store.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "3462"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "Python",
"bytes": "1467067"
},
{
"name": "Shell",
"bytes": "941"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
with open('requirements.txt') as f_in:
lines = (l.strip() for l in f_in.readlines())
install_requires = [l for l in lines if l and not l.startswith('--')]
with open('README.md') as f_in:
long_description = f_in.read()
setup(
name='fangorn',
version='0.0.1',
description='Slackbot for personal use',
long_description=long_description,
url='https://github.com/lwbrooke/slackbot',
license='Apache',
author='Logan Brooke',
packages=find_packages(),
package_data={
'fangorn': ['config_files/*.yaml']
},
entry_points={
'console_scripts': [
'fangorn = fangorn.__main__:main'
]
},
install_requires=install_requires,
setup_requires=[
'wheel'
]
)
| {
"content_hash": "bf7f7e698bf83b0f6975a7ab205d43f2",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 73,
"avg_line_length": 25.806451612903224,
"alnum_prop": 0.605,
"repo_name": "lwbrooke/slackbot",
"id": "afcfde367eacc94a5affd6297cdb52fa676cba68",
"size": "800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "274"
},
{
"name": "Python",
"bytes": "23758"
}
],
"symlink_target": ""
} |
"""
Human Resource Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
s3db.hrm_vars()
# =============================================================================
def index():
""" Module Home Page """
mode = session.s3.hrm.mode
if mode is not None:
# Go to Personal Profile
redirect(URL(f="person"))
else:
# Bypass home page & go direct to searchable list of Staff
redirect(URL(f="staff", args="search"))
# =============================================================================
# People
# =============================================================================
def human_resource():
"""
HR Controller
- combined (unused, except for Imports)
"""
tablename = "hrm_human_resource"
table = s3db[tablename]
# Default to Staff
_type = table.type
s3.filter = (_type == 1)
def prep(r):
if r.method == "form":
return True
if r.interactive:
if r.method == "create" and not r.component:
redirect(URL(f="volunteer",
args=request.args,
vars=request.vars))
elif r.method == "delete":
# Don't redirect
pass
elif r.id:
# Redirect to person controller
vars = {
"human_resource.id" : r.id,
"group" : "staff"
}
redirect(URL(f="person",
vars=vars))
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
s3_action_buttons(r, deletable=settings.get_hrm_deletable())
if "msg" in settings.modules and \
auth.permission.has_permission("update", c="hrm", f="compose"):
# @ToDo: Remove this now that we have it in Events?
s3.actions.append({
"url": URL(f="compose",
vars = {"hrm_id": "[id]"}),
"_class": "action-btn",
"label": str(T("Send Message"))})
elif r.representation == "plain" and \
r.method !="search":
# Map Popups
output = s3db.hrm_map_popup(r)
return output
s3.postp = postp
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def staff():
"""
Staff Controller
"""
tablename = "hrm_human_resource"
table = s3db[tablename]
_type = table.type
_type.default = 1
s3.filter = (_type == 1)
table.site_id.writable = True
table.site_id.readable = True
list_fields = ["id",
"person_id",
"job_title_id",
"organisation_id",
"department",
"site_id",
#"site_contact",
(T("Email"), "email"),
(settings.get_ui_label_mobile_phone(), "phone"),
(T("Trainings"), "course"),
(T("Certificates"), "certificate"),
(T("Contract End Date"), "end_date"),
"status",
]
s3.crud_strings[tablename] = s3.crud_strings["hrm_staff"]
if "expiring" in request.get_vars:
s3.filter = s3.filter & \
(table.end_date < (request.utcnow + datetime.timedelta(weeks=4)))
s3.crud_strings[tablename].title_list = T("Staff with Contracts Expiring in the next Month")
# Remove the big Add button
s3db.configure(tablename,
insertable=False)
# Remove Type filter from the Search widget
human_resource_search = s3db.get_config(tablename,
"search_method")
human_resource_search.advanced.pop(1)
s3db.configure(tablename,
list_fields = list_fields,
search_method = human_resource_search)
def prep(r):
if r.interactive:
if not r.component and \
not r.id and \
r.method in [None, "create"]:
# Don't redirect
# Assume staff only between 16-81
s3db.pr_person.date_of_birth.widget = S3DateWidget(past=972, future=-192)
table = r.table
table.site_id.comment = DIV(DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Office/Warehouse/Facility"),
T("The facility where this position is based."),
T("Enter some characters to bring up a list of possible matches."))))
table.status.writable = False
table.status.readable = False
elif r.method == "delete":
# Don't redirect
pass
elif r.id:
# Redirect to person controller
vars = {
"human_resource.id": r.id,
"group": "staff"
}
redirect(URL(f="person",
vars=vars))
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
s3_action_buttons(r, deletable=settings.get_hrm_deletable())
if "msg" in settings.modules and \
auth.permission.has_permission("update", c="hrm", f="compose"):
# @ToDo: Remove this now that we have it in Events?
s3.actions.append({
"url": URL(f="compose",
vars = {"hrm_id": "[id]"}),
"_class": "action-btn",
"label": str(T("Send Message"))
})
elif r.representation == "plain" and \
r.method !="search":
# Map Popups
output = s3db.hrm_map_popup(r)
return output
s3.postp = postp
output = s3_rest_controller("hrm", "human_resource")
return output
# -----------------------------------------------------------------------------
def person():
"""
Person Controller
- used for Personal Profile & Imports
- includes components relevant to HRM
"""
configure = s3db.configure
set_method = s3db.set_method
# Custom Method for Contacts
set_method("pr", resourcename,
method="contacts",
action=s3db.pr_contacts)
# Plug-in role matrix for Admins/OrgAdmins
realms = auth.user is not None and auth.user.realms or []
if ADMIN in realms or ORG_ADMIN in realms:
set_method("pr", resourcename, method="roles",
action=s3base.S3PersonRoleManager())
if settings.has_module("asset"):
# Assets as component of people
s3db.add_component("asset_asset",
pr_person="assigned_to_id")
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
insertable = False,
editable = False,
deletable = False)
group = request.get_vars.get("group", "staff")
hr_id = request.get_vars.get("human_resource.id", None)
if not str(hr_id).isdigit():
hr_id = None
# Configure human resource table
tablename = "hrm_human_resource"
table = s3db[tablename]
table.type.default = 1
request.get_vars.update(xsltmode="staff")
if hr_id:
hr = table[hr_id]
if hr:
group = hr.type == 2 and "volunteer" or "staff"
# Also inform the back-end of this finding
request.get_vars["group"] = group
# Configure person table
tablename = "pr_person"
table = s3db[tablename]
if (group == "staff" and settings.get_hrm_staff_experience() == "programme") or \
(group == "volunteer" and settings.get_hrm_vol_experience() == "programme"):
table.virtualfields.append(s3db.hrm_programme_person_virtual_fields())
configure(tablename,
deletable=False)
mode = session.s3.hrm.mode
if mode is not None:
# Configure for personal mode
s3.crud_strings[tablename].update(
title_display = T("Personal Profile"),
title_update = T("Personal Profile"))
# People can view their own HR data, but not edit it
configure("hrm_human_resource",
insertable = False,
editable = False,
deletable = False)
configure("hrm_certification",
insertable = True,
editable = True,
deletable = True)
configure("hrm_credential",
insertable = False,
editable = False,
deletable = False)
configure("hrm_competency",
insertable = True, # Can add unconfirmed
editable = False,
deletable = False)
configure("hrm_training", # Can add but not provide grade
insertable = True,
editable = False,
deletable = False)
configure("hrm_experience",
insertable = False,
editable = False,
deletable = False)
configure("pr_group_membership",
insertable = False,
editable = False,
deletable = False)
else:
# Configure for HR manager mode
s3.crud_strings[tablename].update(
title_upload = T("Import Staff"),
title_display = T("Staff Member Details"),
title_update = T("Staff Member Details")
)
# Upload for configuration (add replace option)
s3.importerPrep = lambda: dict(ReplaceOption=T("Remove existing data before import"))
# Import pre-process
def import_prep(data, group=group):
"""
Deletes all HR records (of the given group) of the organisation
before processing a new data import, used for the import_prep
hook in s3mgr
"""
resource, tree = data
xml = current.xml
tag = xml.TAG
att = xml.ATTRIBUTE
if s3.import_replace:
if tree is not None:
if group == "staff":
group = 1
elif group == "volunteer":
group = 2
else:
return # don't delete if no group specified
root = tree.getroot()
expr = "/%s/%s[@%s='org_organisation']/%s[@%s='name']" % \
(tag.root, tag.resource, att.name, tag.data, att.field)
orgs = root.xpath(expr)
for org in orgs:
org_name = org.get("value", None) or org.text
if org_name:
try:
org_name = json.loads(xml.xml_decode(org_name))
except:
pass
if org_name:
htable = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (otable.name == org_name) & \
(htable.organisation_id == otable.id) & \
(htable.type == group)
resource = s3db.resource("hrm_human_resource", filter=query)
ondelete = s3db.get_config("hrm_human_resource", "ondelete")
resource.delete(ondelete=ondelete, format="xml", cascade=True)
s3mgr.import_prep = import_prep
# CRUD pre-process
def prep(r):
if r.representation == "s3json":
s3mgr.show_ids = True
elif r.interactive and r.method != "import":
if r.component:
if r.component_name == "human_resource":
table = r.component.table
table.site_id.writable = True
table.site_id.readable = True
org = session.s3.hrm.org
if org is not None:
table.organisation_id.default = org
table.organisation_id.comment = None
table.organisation_id.readable = False
table.organisation_id.writable = False
table.site_id.requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"org_site.%s" % s3db.super_key(db.org_site),
s3db.org_site_represent,
filterby="organisation_id",
filter_opts=[session.s3.hrm.org]))
elif r.component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = False
table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = True
table.ethnicity.readable = True
table.blood_type.writable = True
table.blood_type.readable = True
table.medical_conditions.writable = True
table.medical_conditions.readable = True
table.other_details.writable = True
table.other_details.readable = True
elif r.component_name == "asset":
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
insertable = False,
editable = False,
deletable = False)
elif r.method == "contacts":
#s3.js_global.append('''controller="hrm"''')
pass
else:
table = r.table
# No point showing the 'Occupation' field - that's the Job Title in the Staff Record
table.occupation.readable = False
table.occupation.writable = False
table.pe_label.readable = False
table.pe_label.writable = False
table.missing.readable = False
table.missing.writable = False
table.age_group.readable = False
table.age_group.writable = False
# Assume volunteers only between 12-81
table.date_of_birth.widget = S3DateWidget(past=972, future=-144)
resource = r.resource
if mode is not None:
r.resource.build_query(id=s3_logged_in_person())
else:
if not r.id and not hr_id:
# pre-action redirect => must retain prior errors
if response.error:
session.error = response.error
redirect(URL(r=r, f="staff"))
if resource.count() == 1:
resource.load()
r.record = resource.records().first()
if r.record:
r.id = r.record.id
if not r.record:
session.error = T("Record not found")
redirect(URL(f="staff",
args=["search"]))
if hr_id and r.component_name == "human_resource":
r.component_id = hr_id
configure("hrm_human_resource",
insertable = False)
#if not r.component_id or r.method in ("create", "update"):
# s3base.s3_address_hide(s3db.pr_address)
return True
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "human_resource":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
if r.component_name == "experience":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_experience_start_date','hrm_experience_end_date')''')
elif r.component_name == "asset":
# Provide a link to assign a new Asset
# @ToDo: Proper Widget to do this inline
output["add_btn"] = A(T("Assign Asset"),
_href=URL(c="asset", f="asset"),
_id="add-btn",
_class="action-btn")
return output
s3.postp = postp
# REST Interface
if session.s3.hrm.orgname and mode is None:
orgname = session.s3.hrm.orgname
else:
orgname = None
output = s3_rest_controller("pr", resourcename,
native=False,
rheader=s3db.hrm_rheader,
orgname=orgname,
replace_option=T("Remove existing data before import"),
csv_extra_fields=[
dict(label="Type",
field=s3db.hrm_human_resource.type)
],
)
return output
# -----------------------------------------------------------------------------
def profile():
"""
Profile Controller
- includes components relevant to HRM
"""
request.args = [str(s3_logged_in_person())]
# Custom Method for Contacts
s3db.set_method("pr", resourcename,
method="contacts",
action=s3db.pr_contacts)
if settings.has_module("asset"):
# Assets as component of people
s3db.add_component("asset_asset",
pr_person="assigned_to_id")
group = request.get_vars.get("group", "staff")
# Configure human resource table
tablename = "hrm_human_resource"
table = s3db[tablename]
table.type.default = 1
# Configure person table
tablename = "pr_person"
table = s3db[tablename]
if (group == "staff" and settings.get_hrm_staff_experience() == "programme") or \
(group == "volunteer" and settings.get_hrm_vol_experience() == "programme"):
table.virtualfields.append(s3db.hrm_programme_person_virtual_fields())
s3db.configure(tablename,
deletable=False)
# Configure for personal mode
s3.crud_strings[tablename].update(
title_display = T("Personal Profile"),
title_update = T("Personal Profile"))
# CRUD pre-process
def prep(r):
if r.interactive and r.method != "import":
if r.component:
if r.component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = False
table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = True
table.ethnicity.readable = True
table.blood_type.writable = True
table.blood_type.readable = True
table.medical_conditions.writable = True
table.medical_conditions.readable = True
table.other_details.writable = True
table.other_details.readable = True
else:
table = r.table
# No point showing the 'Occupation' field - that's the Job Title in the Staff Record
table.occupation.readable = False
table.occupation.writable = False
table.pe_label.readable = False
table.pe_label.writable = False
table.missing.readable = False
table.missing.writable = False
table.age_group.readable = False
table.age_group.writable = False
# Assume volunteers only between 12-81
table.date_of_birth.widget = S3DateWidget(past=972, future=-144)
return True
else:
# Disable non-interactive & import
return False
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "human_resource":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
if r.component_name == "experience":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_experience_start_date','hrm_experience_end_date')''')
return output
s3.postp = postp
output = s3_rest_controller("pr", "person",
native=False,
rheader=s3db.hrm_rheader,
)
return output
# -----------------------------------------------------------------------------
def person_search():
"""
Person REST controller
- limited to just search.json for use in Autocompletes
- allows differential access permissions
"""
group = request.get_vars.get("group", None)
if group == "staff":
s3.filter = (s3db.hrm_human_resource.type == 1)
elif group == "volunteer":
s3.filter = (s3db.hrm_human_resource.type == 2)
s3db.configure("hrm_human_resource",
# S3HRSearch
search_method = s3db.hrm_autocomplete_search,
)
s3.prep = lambda r: r.representation == "json" and \
r.method == "search"
return s3_rest_controller("hrm", "human_resource")
# =============================================================================
# Teams
# =============================================================================
def group():
"""
Team controller
- uses the group table from PR
"""
tablename = "pr_group"
table = s3db[tablename]
_group_type = table.group_type
_group_type.label = T("Team Type")
table.description.label = T("Team Description")
table.name.label = T("Team Name")
mtable = s3db.pr_group_membership
mtable.group_id.label = T("Team ID")
mtable.group_head.label = T("Team Leader")
# Set Defaults
_group_type.default = 3 # 'Relief Team'
_group_type.readable = _group_type.writable = False
# Only show Relief Teams
# Do not show system groups
s3.filter = (table.system == False) & \
(_group_type == 3)
# CRUD Strings
ADD_TEAM = T("Add Team")
s3.crud_strings[tablename] = Storage(
title_create = ADD_TEAM,
title_display = T("Team Details"),
title_list = T("Teams"),
title_update = T("Edit Team"),
title_search = T("Search Teams"),
subtitle_create = T("Add New Team"),
label_list_button = T("List Teams"),
label_create_button = T("Add New Team"),
label_search_button = T("Search Teams"),
msg_record_created = T("Team added"),
msg_record_modified = T("Team updated"),
msg_record_deleted = T("Team deleted"),
msg_list_empty = T("No Teams currently registered"))
s3.crud_strings["pr_group_membership"] = Storage(
title_create = T("Add Member"),
title_display = T("Membership Details"),
title_list = T("Team Members"),
title_update = T("Edit Membership"),
title_search = T("Search Member"),
subtitle_create = T("Add New Member"),
label_list_button = T("List Members"),
label_create_button = T("Add Team Member"),
label_delete_button = T("Delete Membership"),
msg_record_created = T("Team Member added"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Membership deleted"),
msg_list_empty = T("No Members currently registered"))
s3db.configure(tablename, main="name", extra="description",
# Redirect to member list when a new group has been created
create_next = URL(f="group",
args=["[id]", "group_membership"]))
s3db.configure("pr_group_membership",
list_fields=["id",
"person_id",
"group_head",
"description"])
# Post-process
def postp(r, output):
if r.interactive:
if not r.component:
update_url = URL(args=["[id]", "group_membership"])
s3_action_buttons(r, deletable=False, update_url=update_url)
if "msg" in settings.modules and \
auth.permission.has_permission("update", c="hrm", f="compose"):
s3.actions.append({
"url": URL(f="compose",
vars = {"group_id": "[id]"}),
"_class": "action-btn",
"label": str(T("Send Notification"))})
return output
s3.postp = postp
tabs = [
(T("Team Details"), None),
# Team should be contacted either via the Leader or
# simply by sending a message to the group as a whole.
#(T("Contact Data"), "contact"),
(T("Members"), "group_membership")
]
output = s3_rest_controller("pr", resourcename,
rheader=lambda r: s3db.pr_rheader(r, tabs=tabs))
return output
# =============================================================================
# Jobs
# =============================================================================
def job_role():
""" Job Roles Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
output = s3_rest_controller()
return output
def job_title():
""" Job Titles Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Skills
# =============================================================================
def skill():
""" Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_type():
""" Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def competency_rating():
""" Competency Rating for Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_provision():
""" Skill Provisions Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def course():
""" Courses Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def course_certificate():
""" Courses to Certificates Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def certificate():
""" Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def certificate_skill():
""" Certificates to Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def training():
""" Training Controller - used for Searching for Participants """
return s3db.hrm_training_controller()
# -----------------------------------------------------------------------------
def training_event():
""" Training Events Controller """
return s3db.hrm_training_event_controller()
# =============================================================================
def skill_competencies():
"""
Called by S3FilterFieldChange to provide the competency options for a
particular Skill Type
"""
table = s3db.hrm_skill
ttable = s3db.hrm_skill_type
rtable = s3db.hrm_competency_rating
query = (table.id == request.args[0]) & \
(table.skill_type_id == ttable.id) & \
(rtable.skill_type_id == table.skill_type_id)
records = db(query).select(rtable.id,
rtable.name,
orderby=~rtable.priority)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_org_site_json():
"""
Used by the Asset - Assign to Person page
"""
table = s3db.hrm_human_resource
otable = s3db.org_organisation
#db.req_commit.date.represent = lambda dt: dt[:10]
query = (table.person_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(table.site_id,
otable.id,
otable.name)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
# Messaging
# =============================================================================
def compose():
""" Send message to people/teams """
return s3db.hrm_compose()
# END =========================================================================
| {
"content_hash": "b0d047d0a33f185fbd7eb1ed737143ad",
"timestamp": "",
"source": "github",
"line_count": 878,
"max_line_length": 138,
"avg_line_length": 37.50227790432802,
"alnum_prop": 0.48306860631093024,
"repo_name": "smeissner/eden",
"id": "b0fd452987582d0b38d1807f2114656a9549bcd6",
"size": "32952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/hrm.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
from django.db import connection
from bluebottle.clients.utils import LocalTenant
def update_time_spent(apps, schema_editor):
TaskMember = apps.get_model('tasks', "TaskMember")
with LocalTenant(connection.tenant):
for tm in TaskMember.objects.filter(status='realized'):
if tm.task and tm.task.time_needed:
tm.time_spent = tm.task.time_needed
tm.save()
class Migration(migrations.Migration):
dependencies = [
('tasks', '0002_auto_20160614_1354'),
]
operations = [
migrations.RunPython(update_time_spent),
]
| {
"content_hash": "5c7ae1b69be121b86624d53ad7fe6490",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 67,
"avg_line_length": 27.52,
"alnum_prop": 0.6540697674418605,
"repo_name": "onepercentclub/bluebottle",
"id": "b9555fd6044e0fedc3f7ee47b6704b5e4dcbe0ab",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/tasks/migrations/0003_auto_20160621_1707.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
__author__ = 'alandinneen'
from MySQLdb import connect, Error
from collections import OrderedDict
class DBConn(object):
"""
A class to handle all MySQL connection reads/writes.
"""
def __init__(self, host=None, db=None, user=None, password=None):
self._host = host
self._db = db
self._user = user
self._pass = password
def connect(self):
"""
Return a connect object to MySQL database
"""
db = connect(self._host, self._user, self._pass, self._db)
return db
def close(self, conobj):
"""
Close connection to MySQL database
"""
conobj.close()
return None
def select(self, sqlselect):
"""
Simple method for performing MySQL select.
"""
try:
db = self.connect()
cursor = db.cursor()
cursor.execute(sqlselect)
rawdata = cursor.fetchall()
cursor.close()
db.close()
return rawdata
except Error, e:
print "There has been an error in the select! " + e
def single_insert(self, sqlinsert):
"""
Performs a single insert and returns the last inserted id for the session.
"""
try:
insertid = None
db = self.connect()
cursor = db.cursor()
cursor.execute(sqlinsert)
cursor.execute("SELECT LAST_INSERT_ID();")
rowid = cursor.fetchall()
insertid = rowid[0][0]
cursor.close()
db.commit()
return insertid
except Exception as e:
db.rollback()
print "There has been an error in the single insert. The transaction has been rolled back. Error: " + e
finally:
db.close()
def mass_insert(self, sqlstatment, dbobj):
"""
Provides a mass insert mechanism. This method needs to be provided a self.connect() object before calling
this method. It should always be followed with a call to self.close()
"""
try:
cursor = dbobj.cursor()
cursor.execute(sqlstatment)
except Exception as e:
dbobj.rollback()
print "There has been an error in the single insert. The transaction has been rolled back. Error: " + e
def update(self, sqlupdate):
"""
Opens a MySQL connection, performs an update, then closes the connection. Rolls back any changes if an
error occurs.
"""
try:
insertid = None
db = self.connect()
cursor = db.cursor()
cursor.execute(sqlupdate)
cursor.close()
db.commit()
except Exception as e:
db.rollback()
print "There has been an error in the single insert. The transaction has been rolled back. Error: " + e
finally:
db.close()
def cursor_results_to_dict(self, results):
"""
Returns a list of OrderedDicts from the cursor results
"""
data = []
if results.rowcount:
keys = results.keys()
for row in results:
obj = OrderedDict()
for key in keys:
obj[key] = str(row[key]).decode('UTF-8', 'ignore')
data.append(obj)
return data | {
"content_hash": "1fe015a3f41b63a1bc111b5740c80af6",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 115,
"avg_line_length": 30.954545454545453,
"alnum_prop": 0.5380323054331865,
"repo_name": "rad08d/mysqlconn",
"id": "216c07714d35590bc3517a1bba6dd85ddf42d1a7",
"size": "3405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbconn/dbconn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3674"
}
],
"symlink_target": ""
} |
'''
Share commands
'''
import json
import qumulo.lib.opts
import qumulo.lib.util
import qumulo.rest.nfs as nfs
import qumulo.rest.users as users
from qumulo.rest.nfs import NFSRestriction
def convert_nfs_user_mapping(name):
convert = {
'none': 'NFS_MAP_NONE',
'root': 'NFS_MAP_ROOT',
'all': 'NFS_MAP_ALL',
'nfs_map_none': 'NFS_MAP_NONE',
'nfs_map_root': 'NFS_MAP_ROOT',
'nfs_map_all': 'NFS_MAP_ALL',
}
if name.lower() not in convert:
raise ValueError('%s is not one of none, root, or all' % (name))
return convert[name.lower()]
def process_user_mapping(user_mapping, map_to_user_id):
user_mapping = convert_nfs_user_mapping(user_mapping)
if user_mapping is not 'NFS_MAP_NONE' and map_to_user_id == '0':
raise ValueError('user_mapping ' + user_mapping +
' requires map_to_user_id')
if user_mapping is 'NFS_MAP_NONE' and map_to_user_id != '0':
raise ValueError('map_to_user_id is only valid when mapping an user ' +
'(user_mapping is not NONE). If user_mapping is NONE, remove ' +
'map_to_user_id or make it "0".')
return user_mapping
def parse_nfs_restrictions_file(conninfo, credentials, path):
# Parse JSON file.
with open(path) as f:
contents = f.read()
try:
restrictions = json.loads(contents)
except ValueError, e:
raise ValueError('Error parsing JSON restrictions file ' + str(e))
# Validate the restrictions are well formed, and create the
# NFSRestriction object.
nfs_restrictions = list()
for r in restrictions['restrictions']:
# Get read-only.
read_only = r.get('read_only', False)
# Process host restrictions.
host_restrictions = r.get('host_restrictions', [])
# Process user mapping values.
try:
user_mapping = process_user_mapping(r.get('user_mapping', 'none'),
r.get('map_to_user_id', '0'))
except ValueError as e:
raise ValueError('When trying to process the following ' +
'restriction: ' + str(r) + ', this error was thrown: ' +
e.message)
# Allow either auth_id or user name.
user_id = users.get_user_id(conninfo, credentials,
r.get('map_to_user_id', '0'))
# Add the NFSRestriction to the list.
nfs_restrictions.append(
NFSRestriction({'read_only': read_only,
'host_restrictions': host_restrictions,
'user_mapping': user_mapping,
'map_to_user_id': str(user_id.data)}))
# Return the list of restrictions.
return nfs_restrictions
class NFSListSharesCommand(qumulo.lib.opts.Subcommand):
NAME = "nfs_list_shares"
DESCRIPTION = "List all NFS shares"
@staticmethod
def main(conninfo, credentials, _args):
print nfs.nfs_list_shares(conninfo, credentials)
class NFSAddShareCommand(qumulo.lib.opts.Subcommand):
NAME = "nfs_add_share"
DESCRIPTION = "Add a new NFS share"
@staticmethod
def options(parser):
parser.add_argument("--export-path", type=str, default=None,
required=True, help="NFS Export path")
parser.add_argument("--fs-path", type=str, default=None, required=True,
help="File system path")
parser.add_argument("--description", type=str, default='',
help="Description of this export")
# Require either 'no-restrictions' or the restrictions file.
restriction_arg = parser.add_mutually_exclusive_group(required=True)
restriction_arg.add_argument("--no-restrictions", action="store_true",
default=False, help='Specify no restrictions for this share.')
restriction_arg.add_argument("--restrictions", type=str, default=None,
metavar='JSON_FILE_PATH', required=False,
help='Path to local file containing the ' +
'restrictions in JSON format. ' +
'user_mapping can be "none"|"root"|"all". ' +
'map_to_user_id may be "guest"|"admin"|"<integer_id>". ' +
'Example JSON: ' +
'{ "restrictions" : [ { ' +
'"read_only" : true, ' +
'"host_restrictions" : [ "1.2.3.1", "1.2.3.2" ], ' +
'"user_mapping" : "root", ' +
'"map_to_user_id" : "guest" }, ' +
'{<another_restriction>} ] }')
parser.add_argument("--create-fs-path", action="store_true",
help="Creates the specified file system path if it does not exist")
@staticmethod
def main(conninfo, credentials, args):
if args.restrictions:
restrictions = parse_nfs_restrictions_file(conninfo, credentials,
args.restrictions)
else:
restrictions = [NFSRestriction.create_default()]
print nfs.nfs_add_share(conninfo, credentials,
args.export_path, args.fs_path, args.description, restrictions,
args.create_fs_path)
class NFSListShareCommand(qumulo.lib.opts.Subcommand):
NAME = "nfs_list_share"
DESCRIPTION = "List a share"
@staticmethod
def options(parser):
parser.add_argument("--id", type=str, default=None, required=True,
help="ID of share to list")
@staticmethod
def main(conninfo, credentials, args):
print nfs.nfs_list_share(conninfo, credentials, args.id)
class NFSModShareCommand(qumulo.lib.opts.Subcommand):
NAME = "nfs_mod_share"
DESCRIPTION = "Modify a share"
@staticmethod
def options(parser):
parser.add_argument("--id", type=str, default=None, required=True,
help="ID of share to modify")
parser.add_argument("--export-path", type=str, default=None,
help="Change NFS export path")
parser.add_argument("--fs-path", type=str, default=None,
help="Change file system path")
parser.add_argument("--description", type=str, default=None,
help="Description of this export")
# Do not require a restrictions argument, it will preserve the existing
# ones.
restriction_arg = parser.add_mutually_exclusive_group(required=False)
restriction_arg.add_argument("--no-restrictions", action="store_true",
default=False, help='Specify no restrictions for this share.')
restriction_arg.add_argument("--restrictions", type=str, default=None,
metavar='JSON_FILE_PATH', required=False,
help='Path to local file containing the ' +
'restrictions in JSON format. ' +
'user_mapping can be "none"|"root"|"all". ' +
'map_to_user_id may be "guest"|"admin"|"<integer_id>". ' +
'Example JSON: ' +
'{ "restrictions" : [ { ' +
'"read_only" : true, ' +
'"host_restrictions" : [ "1.2.3.1", "1.2.3.2" ], ' +
'"user_mapping" : "root", ' +
'"map_to_user_id" : "guest" }, ' +
'{<another_restriction>} ] }')
parser.add_argument("--create-fs-path", action="store_true",
help="Creates the specified file system path if it does not exist")
@staticmethod
def main(conninfo, credentials, args):
# Get existing share
share_info = {}
share_info, share_info['if_match'] = \
nfs.nfs_list_share(conninfo, credentials, args.id)
# Modify share
share_info['id_'] = share_info['id']
share_info['allow_fs_path_create'] = args.create_fs_path
del share_info['id']
if args.export_path is not None:
share_info['export_path'] = args.export_path
if args.fs_path is not None:
share_info['fs_path'] = args.fs_path
if args.description is not None:
share_info['description'] = args.description
# Overwrite the NFS restrictions from JSON file.
if args.restrictions:
share_info['restrictions'] = parse_nfs_restrictions_file(
conninfo, credentials, args.restrictions)
elif args.no_restrictions:
# Modify the share's restrictions to be the default ones (no
# restrictions).
share_info['restrictions'] = [NFSRestriction.create_default()]
else:
# If no restrictions were specified and the user didn't set the
# --no-restrictions flag, let's preserve the ones that
# were originally set for this share. However, we need to re-pack
# them to be of type "NFSRestriction", in order to keep the REST
# client consistent.
share_info['restrictions'] = \
[NFSRestriction(r) for r in share_info['restrictions']]
print nfs.nfs_modify_share(conninfo, credentials,
**share_info)
class NFSDeleteShareCommand(qumulo.lib.opts.Subcommand):
NAME = "nfs_delete_share"
DESCRIPTION = "Delete a share"
@staticmethod
def options(parser):
parser.add_argument("--id", type=str, default=None, required=True,
help="ID of share to delete")
@staticmethod
def main(conninfo, credentials, args):
nfs.nfs_delete_share(conninfo, credentials, args.id)
print "Share has been deleted."
| {
"content_hash": "66ff1b77ef9b56c5bbe113639756f4fe",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 79,
"avg_line_length": 39.99145299145299,
"alnum_prop": 0.5960675357982474,
"repo_name": "Qumulo/qumulo_splunk_app",
"id": "6d27080036661982f8dc7746e3d44bb9b719746e",
"size": "9931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/qumulo/commands/nfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "641909"
}
],
"symlink_target": ""
} |
"""
You are given a string of lowercase letters. Your task is to figure out the index of the character on whose removal will
make the string a palindrome. There will always be a valid solution.
In case string is already palindrome, then -1 is also a valid answer along with possible indices.
Input Format
The first line contains T i.e. number of test cases.
T lines follow, each line containing a string.
"""
__author__ = 'Danyang'
class Solution_TLE(object):
def solve(self, cipher):
"""
Algorithm:
brutal force. O(N^2)
Non-brainer
:param cipher: the cipher
"""
for i in xrange(len(cipher)):
if self.__is_palindrome(cipher[:i] + cipher[i + 1:]):
return i
return -1
def __is_palindrome(self, s):
return s == s[::-1]
class Solution(object):
def solve(self, cipher):
"""
Algorithm:
Guarantee one and only one answer
O(N)
:param cipher: the cipher
"""
l = len(cipher)
start = 0
end = l - 1
while start < end and cipher[start] == cipher[end]:
start += 1
end -= 1
if self.__is_palindrome(cipher[:start] + cipher[start + 1:]):
return start
if self.__is_palindrome(cipher[:end] + cipher[end + 1:]):
return end
if start >= end:
return -1
def __is_palindrome(self, s):
return s == s[::-1]
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
cipher = f.readline().strip()
# solve
s = "%s\n" % (Solution().solve(cipher))
print s,
| {
"content_hash": "5037d5db075cecb6f36521f232ffe904",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 120,
"avg_line_length": 24.88888888888889,
"alnum_prop": 0.546875,
"repo_name": "algorhythms/HackerRankAlgorithms",
"id": "ad7bb2c9cdfdf553a20e280b5c4d1050472999b7",
"size": "1792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Palindrome Index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "5450"
},
{
"name": "Java",
"bytes": "2566"
},
{
"name": "Python",
"bytes": "208997"
}
],
"symlink_target": ""
} |
from oslo_log import log
from manila.common import constants
from manila.share import driver
from manila.tests import fake_service_instance
LOG = log.getLogger(__name__)
class FakeShareDriver(driver.ShareDriver):
"""Fake share driver.
This fake driver can be also used as a test driver within a real
running manila-share instance. To activate it use this in manila.conf::
enabled_share_backends = fake
[fake]
driver_handles_share_servers = True
share_backend_name = fake
share_driver = manila.tests.fake_driver.FakeShareDriver
With it you basically mocked all backend driver calls but e.g. networking
will still be activated.
"""
def __init__(self, *args, **kwargs):
self._setup_service_instance_manager()
super(FakeShareDriver, self).__init__([True, False], *args, **kwargs)
def _setup_service_instance_manager(self):
self.service_instance_manager = (
fake_service_instance.FakeServiceInstanceManager())
def manage_existing(self, share, driver_options, share_server=None):
LOG.debug("Fake share driver: manage")
LOG.debug("Fake share driver: driver options: %s",
str(driver_options))
return {'size': 1}
def unmanage(self, share, share_server=None):
LOG.debug("Fake share driver: unmanage")
@property
def driver_handles_share_servers(self):
if not isinstance(self.configuration.safe_get(
'driver_handles_share_servers'), bool):
return True
return self.configuration.driver_handles_share_servers
def create_snapshot(self, context, snapshot, share_server=None):
pass
def delete_snapshot(self, context, snapshot, share_server=None):
pass
def create_share(self, context, share, share_server=None):
return ['/fake/path', '/fake/path2']
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None, parent_share=None):
return {
'export_locations': ['/fake/path', '/fake/path2'],
'status': constants.STATUS_AVAILABLE,
}
def delete_share(self, context, share, share_server=None):
pass
def ensure_share(self, context, share, share_server=None):
pass
def allow_access(self, context, share, access, share_server=None):
pass
def deny_access(self, context, share, access, share_server=None):
pass
def get_share_stats(self, refresh=False):
return None
def do_setup(self, context):
pass
def setup_server(self, *args, **kwargs):
pass
def teardown_server(self, *args, **kwargs):
pass
def get_network_allocations_number(self):
# NOTE(vponomaryov): Simulate drivers that use share servers and
# do not use 'service_instance' module.
return 2
def _verify_share_server_handling(self, driver_handles_share_servers):
return super(FakeShareDriver, self)._verify_share_server_handling(
driver_handles_share_servers)
def create_share_group(self, context, group_id, share_server=None):
pass
def delete_share_group(self, context, group_id, share_server=None):
pass
def get_share_status(self, share, share_server=None):
return {
'export_locations': ['/fake/path', '/fake/path2'],
'status': constants.STATUS_AVAILABLE,
}
| {
"content_hash": "83dc7559042ddd66ddfff063e3e03d34",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 77,
"avg_line_length": 31.45045045045045,
"alnum_prop": 0.6390718991692924,
"repo_name": "openstack/manila",
"id": "b75508e7831e43c6c95467c8d15ef969dbf541bc",
"size": "4141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/tests/fake_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "12728998"
},
{
"name": "Shell",
"bytes": "107601"
}
],
"symlink_target": ""
} |
from pytest import fixture
from solar.system_log import change
from solar.system_log import data
from solar.system_log import operations
from solar.core.resource import resource
from solar.interfaces import orm
def test_revert_update():
commit = {'a': '10'}
previous = {'a': '9'}
res = orm.DBResource(id='test1', name='test1', base_path='x')
res.save()
res.add_input('a', 'str', '9')
action = 'update'
resource_obj = resource.load(res.name)
assert resource_obj.args == previous
log = data.SL()
logitem =change.create_logitem(
res.name, action, change.create_diff(commit, previous))
log.append(logitem)
resource_obj.update(commit)
operations.move_to_commited(logitem.log_action)
assert resource_obj.args == commit
change.revert(logitem.uid)
assert resource_obj.args == previous
| {
"content_hash": "92f34d306626d2a4e5d6b9ce9fbe85c3",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 65,
"avg_line_length": 27.612903225806452,
"alnum_prop": 0.6845794392523364,
"repo_name": "torgartor21/solar",
"id": "be36e5a74bdce01593e4d76a07573c2b19f105aa",
"size": "1467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solar/solar/test/test_system_log_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Puppet",
"bytes": "82954"
},
{
"name": "Python",
"bytes": "289854"
},
{
"name": "Shell",
"bytes": "1785"
}
],
"symlink_target": ""
} |
from base import TestRailAPIBase
class Suite(TestRailAPIBase):
"""
Use the following API methods to request details
about test suites and to create or modify test suites.
"""
def __repr__(self):
return '<TestRailAPI suite>'
def get(self, suite_id):
"""
Returns an existing test suite.
:param suite_id:The ID of the test suite
"""
return self._get('get_suite/{}'.format(suite_id))
def for_project(self, project_id):
"""
Returns a list of test suites for a project.
:param project_id:The ID of the project
"""
return self._get('get_suites/{}'.format(project_id))
def add(self, project_id, name, description=None):
"""
Creates a new test suite.
:param project_id:The ID of the project the test suite should be added to
:param name:The name of the test suite (required)
:param description:The description of the test suite
"""
param = dict(name=name, description=description)
return self._post('add_suite/{}'.format(project_id),
json=param)
def update(self, suite_id, name, description=None):
"""
Creates a new test suite.
:param suite_id:The ID of the test suite
:param name:The name of the test suite (required)
:param description:The description of the test suite
"""
param = dict(name=name, description=description)
return self._post('add_suite/{}'.format(suite_id),
json=param)
def delete(self, suite_id):
"""
Deletes an existing test suite.
:param suite_id:The ID of the test suite
"""
return self._post('delete_suite/{}'.format(suite_id))
| {
"content_hash": "937123f7cd4cb4536dc23ef2b971759e",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 81,
"avg_line_length": 33.905660377358494,
"alnum_prop": 0.5909849749582637,
"repo_name": "JASON0916/testrail-library",
"id": "04796643f9afcdd5ff4fa48f89887ffa2ee769ba",
"size": "1988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testrail_client/api/suite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42161"
}
],
"symlink_target": ""
} |
import pytest
import loginsightwebhookdemo.slack
import conftest
NUMRESULTS = '1'
# A public URL for testing Teams is not available
URL = 'https://mock.test'
@pytest.mark.parametrize("url,post,data,expected,method", [
# No URL
(None,
'/endpoint/msteams',
conftest.payload,
'500 INTERNAL SERVER ERROR', 'POST'),
(None,
'/endpoint/msteams/',
conftest.payload,
'404 NOT FOUND', 'POST'),
# All params
(URL,
'/endpoint/msteams',
conftest.payload,
'500 INTERNAL SERVER ERROR', 'POST'),
(URL,
'/endpoint/msteams',
conftest.payloadvROps60,
'500 INTERNAL SERVER ERROR', 'POST'),
(URL,
'/endpoint/msteams',
conftest.payloadvROps62,
'500 INTERNAL SERVER ERROR', 'POST'),
(URL,
'/endpoint/msteams',
conftest.payloadLI_test,
'500 INTERNAL SERVER ERROR', 'POST'),
(URL,
'/endpoint/msteams/' + NUMRESULTS,
conftest.payload,
'500 INTERNAL SERVER ERROR', 'POST'),
])
def test_msteams(url, post, data, expected, method):
if url is not None:
loginsightwebhookdemo.msteams.TEAMSURL = url
if method == 'PUT':
rsp = conftest.client.put(post, data=data, content_type="application/json")
else:
rsp = conftest.client.post(post, data=data, content_type="application/json")
assert rsp.status == expected
| {
"content_hash": "cfaf949178ed8f4e65245e1d34cd9348",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 84,
"avg_line_length": 27.442307692307693,
"alnum_prop": 0.606166783461808,
"repo_name": "vmw-loginsight/webhook-shims",
"id": "dfaf57b1f3c6316c8a6d421e94712dd86cd1d263",
"size": "1451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/msteams_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "128165"
}
],
"symlink_target": ""
} |
"""
WSGI config for canvas project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "canvas.settings")
application = get_wsgi_application()
| {
"content_hash": "e6ceedd6fac25e7021a6ea558e552676",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.375,
"alnum_prop": 0.7692307692307693,
"repo_name": "eshamay/score",
"id": "642343c7b8e871458c3aa6d8eb2c76123a24ea67",
"size": "390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "canvas/canvas/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "200"
},
{
"name": "JavaScript",
"bytes": "386"
},
{
"name": "Python",
"bytes": "11418"
},
{
"name": "Shell",
"bytes": "3206"
}
],
"symlink_target": ""
} |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See: http://effbot.org/tkinterbook/menu.htm
# "Popup menu are explicitly displayed, using the post method."
# TODO: the popup menu should disapear when the user clic somewhere around...
import tkinter as tk
root = tk.Tk()
# The callback
def hello():
print("Hello!")
# Create a popup menu
menu = tk.Menu(root, tearoff=0)
menu.add_command(label="Hello", command=hello)
menu.add_command(label="Quit", command=root.quit)
# Create a canvas
frame = tk.Frame(root, width=512, height=512)
frame.pack()
label = tk.Label(root, text="Use the mouse right button to show the popup menu")
label.pack(fill=tk.X, expand=1)
def popup(event):
menu.post(event.x_root, event.y_root) # Explicitely display the menu
# Attach popup to canvas
frame.bind("<Button-3>", popup)
root.mainloop()
| {
"content_hash": "7681aa73a44bef51d639e1498d490292",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 80,
"avg_line_length": 30.166666666666668,
"alnum_prop": 0.739542225730071,
"repo_name": "jeremiedecock/snippets",
"id": "5f1fe22180d69aedbb822668c96ea7c140c19deb",
"size": "1953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/tkinter/python3/menu_popup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "4294"
},
{
"name": "Batchfile",
"bytes": "6779"
},
{
"name": "C",
"bytes": "102107"
},
{
"name": "C++",
"bytes": "320943"
},
{
"name": "CMake",
"bytes": "11424"
},
{
"name": "CSS",
"bytes": "21121"
},
{
"name": "Cython",
"bytes": "21"
},
{
"name": "Dockerfile",
"bytes": "1818"
},
{
"name": "Fortran",
"bytes": "633"
},
{
"name": "Gnuplot",
"bytes": "39999"
},
{
"name": "Go",
"bytes": "3166"
},
{
"name": "Groovy",
"bytes": "3009"
},
{
"name": "HTML",
"bytes": "138995"
},
{
"name": "IDL",
"bytes": "43"
},
{
"name": "Java",
"bytes": "120221"
},
{
"name": "JavaScript",
"bytes": "32342"
},
{
"name": "Jinja",
"bytes": "206"
},
{
"name": "Jupyter Notebook",
"bytes": "95991"
},
{
"name": "Lua",
"bytes": "200"
},
{
"name": "M4",
"bytes": "111"
},
{
"name": "MATLAB",
"bytes": "31972"
},
{
"name": "Makefile",
"bytes": "81307"
},
{
"name": "OpenSCAD",
"bytes": "14995"
},
{
"name": "PHP",
"bytes": "94"
},
{
"name": "Perl",
"bytes": "46"
},
{
"name": "Processing",
"bytes": "208"
},
{
"name": "Prolog",
"bytes": "454"
},
{
"name": "Python",
"bytes": "1685966"
},
{
"name": "R",
"bytes": "76"
},
{
"name": "Raku",
"bytes": "43"
},
{
"name": "Ruby",
"bytes": "42"
},
{
"name": "Scheme",
"bytes": "649"
},
{
"name": "Shell",
"bytes": "52865"
},
{
"name": "Smalltalk",
"bytes": "55"
},
{
"name": "TeX",
"bytes": "1189"
},
{
"name": "Vue",
"bytes": "49445"
},
{
"name": "XSLT",
"bytes": "1816"
}
],
"symlink_target": ""
} |
import nghttp2
import logging
import asyncio
import sys
import os, time, io
from base64 import b64decode
from copy import copy, deepcopy
from .config import get_config
from datetime import timedelta, datetime
from uuid import uuid4 as uuid
from wheezy.template.engine import Engine
from wheezy.template.ext.core import CoreExtension
from wheezy.template.loader import FileLoader
from routes import Mapper
from importlib import import_module
import mimetypes
mimetypes.init()
LOG = logging.getLogger(__name__)
# Consider: https://docs.python.org/3/library/contextlib.html#contextlib.closing
template_engine = Engine(
loader=FileLoader(get_config().get('templates', {}).get('search_path', 'content/templates-wheezy;content').split(';')),
extensions=[CoreExtension()]
)
def last_modified(path):
return (b'last-modified', time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.gmtime(os.path.getmtime(path))))
def index(request, start_response):
template = template_engine.get_template('index.html')
if template:
_status = None
_headers = None
def local_start(status, headers):
nonlocal _status
nonlocal _headers
_status = status
_headers.extend(headers)
for path in []: # ['/static/app.js', '/static/app.css']:
_status = 404
_headers = copy(request.response['headers'])
request.match['filename'] = path
body = static_content(request, local_start)
request.push(path, status=_status, headers=_headers, body=body)
request._setup_session()
start_response(200, [(b'content-type', 'text/html'), (b'cache-control', b'public, must-revalidate, max-age=60')])
return template.render({'backends': { k: v for k,v in get_config().items() if 'module' in v }})
def favicon(request, start_response):
start_response(200, [(b'content-type', b'image-x-icon'), (b'cache-control', b'public, max-age=432000000'), last_modified(__file__)])
return b64decode('iVBORw0KGgoAAAANSUhEUgAAABAAAAAQEAYAAABPYyMiAAAABmJLR0T///////8JWPfcAAAACXBIWXMAAABIAAAASABGyWs+AAAAF0lEQVRIx2NgGAWjYBSMglEwCkbBSAcACBAAAeaR9cIAAAAASUVORK5CYII=')
def not_found(request, start_response):
start_response(404, [])
return None
def static_content(request, start_response):
local_path = "content%s" % request.match['filename']
mimetype, _ = mimetypes.guess_type(local_path)
start_response(200, [(b'content-type', mimetype), (b'cache-control', b'public, max-age=60'), last_modified(local_path)])
f = io.open(local_path, 'rb', io.DEFAULT_BUFFER_SIZE)
return f
class Request(nghttp2.BaseRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# defined in BaseRequestHandler:
# scheme
# host
@property
def server_name(self):
return self.host()
@property
def script_name(self):
return ''
@property
def path_info(self):
return self.path
def generate_routes():
m = Mapper()
m.connect('/', handler=index)
m.connect('{filename:/static/.*?}', handler=static_content)
m.connect('/favicon.ico', handler=favicon)
for (k, config) in get_config().items():
config = deepcopy(config)
backend_module = config.pop('module', None)
if not backend_module is None:
LOG.warn("Configuring %s", backend_module)
module = import_module(backend_module)
try:
backend = module.create(config)
for method in [b'GET', b'PUT', b'POST', b'DELETE']:
try:
attr = getattr(backend, method.decode().lower())
conditions = dict(method=[method])
m.connect("/q/%s" % k, conditions=conditions, handler=attr)
m.connect("/q/%s/?{subscription:.*?}" % k, conditions=conditions, handler=attr)
except AttributeError:
pass
except AttributeError as e:
LOG.error("Could not create controller for %s: %s", k, backend_module)
LOG.error(e)
return m
routes = generate_routes()
class Session(Request):
SESSION_ID = 'SSID='
backend = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.session_id = None
self.response = {}
self.match = None
def get_backend(self, name):
backend = Session.backend.get(name, None)
if not backend:
config = get_config().get(name, None)
if not config:
LOG.error("Cannot find config for '%s'", name)
return None
config = deepcopy(config)
backend_module = config.pop('module')
try:
backend = getattr(sys.modules.get(backend_module, None), 'create')(config)
Session.backend[name] = backend
except AttributeError as e:
LOG.error("Cannot load module %s for %s", backend_module, name)
LOG.error(e)
return backend
def _get_session_cookie(self):
cookies = [header[1] for header in self.headers if header[0].decode('ascii').lower() == 'cookie']
for cookie in cookies:
cookie = cookie.decode('ascii')
for item in cookie.split('; '):
if item.startswith(Session.SESSION_ID):
return cookie[len(Session.SESSION_ID)+1:].strip()
return None
def _setup_session(self):
self.session_id = self._get_session_cookie() or str(uuid())
self.response['headers'] = [('set-cookie', "%s=%s; Path=/; Expires=%s; Domain=%s" % (Session.SESSION_ID, self.session_id, (datetime.utcnow() + timedelta(hours=1)).strftime("%a, %d-%b-%Y %X UTC"), ''))]
def start_response(self, status, headers):
self.response['status'] = status
self.response['headers'].extend(headers)
def on_headers(self):
self._setup_session()
LOG.debug("{} {}".format(self.method, self.path))
self.match = routes.match(environ={'PATH_INFO': self.path.decode('utf-8'),
'REQUEST_METHOD': self.method})
if self.match:
body = self.match['handler'](self, self.start_response)
self.response['body'] = body
return self.send_response(**self.response)
def on_data(self, data):
try:
self.response['body'].on_data(data)
except (KeyError, AttributeError) as e:
LOG.debug("Could not pass on on_data: %s", e)
def on_request_done(self):
try:
self.response['body'].on_request_done()
except (KeyError, AttributeError) as e:
LOG.debug("Could not pass on on_request_done: %s", e)
def on_close(self, error_code):
try:
body = self.response.get('body', None)
if body:
if asyncio.iscoroutinefunction(body.close):
asyncio.async(body.close())
else:
body.close()
except AttributeError:
pass
| {
"content_hash": "490583dba292f1f394fe3965dc945ac0",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 209,
"avg_line_length": 35.66831683168317,
"alnum_prop": 0.5944482997918112,
"repo_name": "fwiesel/http2broker",
"id": "3c57f63956dd5e4d5ada7cb1007736a547383f43",
"size": "7205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/http2broker/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1351"
},
{
"name": "HTML",
"bytes": "1663"
},
{
"name": "JavaScript",
"bytes": "1810"
},
{
"name": "Python",
"bytes": "36204"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
#from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from managers import SynonymManager
class Synonym(models.Model):
#CHOICES = {"model__in": ("author", "tag")}
try:
CHOICES = {"model__in": settings.MSYN_CONTENT_TYPES_LIMIT }
except NameError, e:
CHOICES = {}
content_type = models.ForeignKey(ContentType, limit_choices_to = CHOICES, related_name="synonyms")
object_id = models.PositiveIntegerField(db_index=True)
synonym_id = models.PositiveIntegerField(db_index=True)
content_object = generic.GenericForeignKey('content_type', 'object_id')
synonym_object = generic.GenericForeignKey('content_type', 'synonym_id')
synonyms = SynonymManager()
_reverse = False
class Meta:
verbose_name = _(u'synonym')
verbose_name_plural = _(u'synonyms')
def __unicode__(self):
return u"%s is synonym of %s" % (self.content_object, self.synonym_object)
@property
def reverse(self):
if self._reverse is not None and self._reverse:
return True
else:
return False
# return self._reverse
@reverse.setter
def reverse(self, value):
self._reverse = value
@property
def synonym(self):
if not self.reverse:
return self.synonym_object
else:
return self.content_object
@property
def object(self):
if not self.reverse:
return self.content_object
else:
return self.synonym_object
| {
"content_hash": "dc44dc1f89ac426173d9ed83515a06ab",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 102,
"avg_line_length": 27.841269841269842,
"alnum_prop": 0.6482326111744584,
"repo_name": "anscii/django-model-synonyms",
"id": "f753f30a514b1de7e22132b19b17e70ac71d8e36",
"size": "1754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "msyn/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11709"
}
],
"symlink_target": ""
} |
import subprocess
from rstgen.utils import confirm
from django.core.management.base import BaseCommand
from django.conf import settings
def runcmd(cmd, **kw): # same code as in getlino.py
"""Run the cmd similar as os.system(), but stop when Ctrl-C."""
# kw.update(stdout=subprocess.PIPE)
# kw.update(stderr=subprocess.STDOUT)
kw.update(shell=True)
kw.update(universal_newlines=True)
kw.update(check=True)
# subprocess.check_output(cmd, **kw)
subprocess.run(cmd, **kw)
# os.system(cmd)
class Command(BaseCommand):
help = "Run 'pip install --upgrade' for all Python packages required by this site."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('--noinput', action='store_false',
dest='interactive', default=True,
help='Do not prompt for input of any kind.')
parser.add_argument('-l', '--list', action='store_true',
dest='list', default=False,
help="Just list the requirements, don't install them.")
def handle(self, *args, **options):
reqs = set(settings.SITE.get_requirements())
if len(reqs) == 0:
print("No requirements")
else:
reqs = sorted(reqs)
if options['list']:
print('\n'.join(reqs))
return
runcmd('pip install --upgrade pip')
# cmd = "pip install --upgrade --trusted-host svn.forge.pallavi.be {}".format(' '.join(reqs))
cmd = "pip install --upgrade {}".format(' '.join(reqs))
if not options['interactive'] or confirm("{} (y/n) ?".format(cmd)):
runcmd(cmd)
| {
"content_hash": "16a489f6c06b7ec30cb75e406d1b07d4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 105,
"avg_line_length": 38.82222222222222,
"alnum_prop": 0.5804235832856325,
"repo_name": "lino-framework/lino",
"id": "1fdf8bff34060df4cc4393194e114b9056159b55",
"size": "1888",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lino/management/commands/install.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "704"
},
{
"name": "CSS",
"bytes": "1281825"
},
{
"name": "Emacs Lisp",
"bytes": "277895"
},
{
"name": "HTML",
"bytes": "928037"
},
{
"name": "Hack",
"bytes": "3416"
},
{
"name": "JavaScript",
"bytes": "1128493"
},
{
"name": "PHP",
"bytes": "53997"
},
{
"name": "Python",
"bytes": "2601694"
},
{
"name": "Shell",
"bytes": "4469"
},
{
"name": "TSQL",
"bytes": "2427"
}
],
"symlink_target": ""
} |
from flask import render_template, session, redirect, url_for
def checkSession():
if 'username' not in session:
return redirect(url_for('login'))
| {
"content_hash": "fc281c3d61a2b27e7d090a5ebed60e38",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 61,
"avg_line_length": 32.6,
"alnum_prop": 0.6871165644171779,
"repo_name": "darneymartin/ChartIT",
"id": "fce8a7ccfc6b9517f0f3979678367a9c397d4577",
"size": "163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Controller/View/Controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "597"
},
{
"name": "HTML",
"bytes": "50758"
},
{
"name": "JavaScript",
"bytes": "11994"
},
{
"name": "Python",
"bytes": "39519"
},
{
"name": "Shell",
"bytes": "690"
}
],
"symlink_target": ""
} |
"""
@package ion.agents.data.external_dataset_agent
@file ion/agents/data/external_dataset_agent.py
@author Tim Giguere
@author Christopher Mueller
@brief Class derived from InstrumentAgent that provides a one-to-one relationship between an ExternalDatasetAgent instance
and a given external dataset
"""
from pyon.public import log
from pyon.util.containers import get_safe
from pyon.core.exception import InstDriverError
from pyon.core.exception import NotFound
from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
from ion.agents.instrument.exceptions import InstrumentStateException
from pyon.agent.agent import ResourceAgentEvent
from pyon.agent.agent import ResourceAgentState
from ion.agents.instrument.instrument_agent import InstrumentAgent
from ion.core.includes.mi import DriverEvent
class ExternalDatasetAgent(InstrumentAgent):
def __init__(self, initial_state=ResourceAgentState.UNINITIALIZED):
log.debug('ExternalDatasetAgent.__init__: initial_state = {0}'.format(initial_state))
InstrumentAgent.__init__(self, initial_state)
self._fsm.add_handler(ResourceAgentState.STREAMING, ResourceAgentEvent.EXECUTE_RESOURCE, self._handler_streaming_execute_resource)
# TODO: Do we need to (can we even?) remove handlers that aren't supported (i.e. Direct Access?)
###############################################################################
# Private helpers.
###############################################################################
def on_init(self):
InstrumentAgent.on_init(self)
def _start_driver(self, dvr_config):
"""
Instantiate the DataHandler based on the configuration
Called from:
InstrumentAgent._handler_uninitialized_initialize
@param dvr_config The driver configuration, equivalent to self._dvr_config
@retval None or error.
"""
# Get driver configuration and pid for test case.
dvr_mod = get_safe(self._dvr_config, 'dvr_mod', None)
dvr_cls = get_safe(self._dvr_config, 'dvr_cls', None)
dh_cfg = get_safe(self._dvr_config, 'dh_cfg', {})
log.debug('_start_driver: dvr_mod.dvr_cls={0}.{1} dh_cfg={2}'.format(dvr_mod, dvr_cls, dh_cfg))
if not dvr_mod or not dvr_cls:
raise InstDriverError('DataHandler module ({0}) and class ({1}) cannot be None'.format(dvr_mod, dvr_cls))
#CBM: Could also follow the "stream_config" pattern and simply copy the stream_id(s) from agent.stream_config into dh_cfg
if not 'stream_id' in dh_cfg:
raise InstDriverError('DataHandler config must contain dh_cfg.stream_id: {0}'.format(self._dvr_config))
# # TODO: Retrieve all resources needed by the DataHandler, they will be provided during configuration
# ## Here to !!!! END from external_observatory_agent
resreg_cli = ResourceRegistryServiceClient()
# CBM: WTF - If not wrapped - exception DISAPPEARS!! Why?
ext_ds_res = None
try:
ext_ds_res = resreg_cli.read(object_id=self.resource_id)
log.info('Dataset Resource (id={0}): {1}'.format(self.resource_id, ext_ds_res))
except NotFound as ex:
if not ext_ds_res and not get_safe(self._dvr_config, 'dh_cfg.TESTING'):
raise InstDriverError('No resource associated with id = {0}'.format(self.resource_id))
dh_cfg['external_dataset_res'] = ext_ds_res
dh_cfg['external_dataset_res_id'] = self.resource_id
# ext_resources = {'dataset':ext_ds_res}
# log.debug('Retrieved ExternalDataset: {0}'.format(ext_ds_res))
#
# dsrc_res, dsrc_assn = resreg_cli.find_objects(subject=ext_dataset_id, predicate=PRED.hasSource, object_type=RT.DataSource)
# dsrc_res = dsrc_res[0]
# dsrc_id = dsrc_assn[0].o
# ext_resources['datasource'] = dsrc_res
# log.debug('Found associated DataSource: {0}'.format(dsrc_id))
#
# edp_res, edp_assn = resreg_cli.find_objects(subject=dsrc_id, predicate=PRED.hasProvider, object_type=RT.ExternalDataProvider)
# edp_res = edp_res[0]
# edp_id = edp_assn[0].o
# ext_resources['provider'] = edp_res
# log.debug('Found associated ExternalDataProvider: {0}'.format(edp_id))
#
# dsrc_mdl_res, dsrc_mdl_assn = resreg_cli.find_objects(subject=dsrc_id, predicate=PRED.hasModel, object_type=RT.DataSourceModel)
# dsrc_mdl_res = dsrc_mdl_res[0]
# dsrc_mdl_id = dsrc_mdl_assn[0].o
# ext_resources['datasource_model'] = dsrc_mdl_res
# log.debug('Found associated DataSourceModel: {0}'.format(dsrc_mdl_id))
#
# dprod_res, dprod_assn = resreg_cli.find_objects(subject=ext_dataset_id, predicate=PRED.hasOutputProduct, object_type=RT.DataProduct)
# dprod_res = dprod_res[0]
# dprod_id = dprod_assn[0].o
# ext_resources['data_products'] = dprod_res
# log.debug('Found associated DataProduct: {0}'.format(dprod_id))
#
# stream_res, stream_assn = resreg_cli.find_objects(subject=dprod_id, predicate=PRED.hasStream, object_type=RT.Stream)
# stream_res = stream_res[0]
# stream_id = stream_assn[0].o
# ext_resources['stream_res'] = stream_res
# log.debug('Found associated Stream: {0}'.format(stream_id))
#
# comms_config = {'dataset_id':self.resource_id,'resources':ext_resources}
# ## !!!! END
# # TODO: Add the bits the DataHandler needs to know about to the 'comms_config' portion of the _dvr_config
# comms_config = {}
# The 'comms_config' portion of dvr_config is passed to configure()
# self._dvr_config['comms_config'] = comms_config
# Instantiate the DataHandler based on the configuration
try:
module = __import__(dvr_mod, fromlist=[dvr_cls])
classobj = getattr(module, dvr_cls)
log.debug('Load DataHandler: module={0} classojb={1}'.format(module, classobj))
self._dvr_client = classobj(dh_cfg)
self._dvr_client.set_event_callback(self.evt_recv)
# Initialize the DataHandler
self._dvr_client.cmd_dvr('initialize')
except Exception as ex:
self._dvr_client = None
raise InstDriverError('Error instantiating DataHandler \'{0}.{1}\': {2}'.format(dvr_mod, dvr_cls, ex))
#TODO: Temporarily construct packet factories to utilize pathways provided by IA
self._construct_packet_factories()
log.info('ExternalDatasetAgent \'{0}\' loaded DataHandler \'{1}.{2}\''.format(self._proc_name, dvr_mod, dvr_cls))
def _stop_driver(self):
"""
Unload the DataHandler instance
Called from:
InstrumentAgent._handler_inactive_reset,
InstrumentAgent._handler_idle_reset,
InstrumentAgent._handler_stopped_reset,
InstrumentAgent._handler_observatory_reset
@retval None.
"""
dvr_mod = get_safe(self._dvr_config, 'dvr_mod', None)
dvr_cls = get_safe(self._dvr_config, 'dvr_cls', None)
self._dvr_client = None
log.info('ExternalDatasetAgent \'{0}\' unloaded DataHandler \'{1}.{2}\''.format(self._proc_name, dvr_mod, dvr_cls))
return None
def _validate_driver_config(self):
"""
Test the driver config for validity.
Called BEFORE comms_config is added to self._dvr_config, so only validate core portions
@retval True if the current config is valid, False otherwise.
"""
try:
dvr_mod = self._dvr_config['dvr_mod']
dvr_cls = self._dvr_config['dvr_cls']
dvr_cfg = self._dvr_config['dh_cfg']
stream_id = dvr_cfg['stream_id']
except TypeError, KeyError:
return False
if not isinstance(dvr_mod, str) or not isinstance(dvr_cls, str) or not isinstance(dvr_cfg, dict) or not isinstance(stream_id, str):
return False
return True
def _handler_streaming_execute_resource(self, command, *args, **kwargs):
"""
Handler for execute_resource command in streaming state.
Delegates to InstrumentAgent._handler_observatory_execute_resource
"""
if command == DriverEvent.ACQUIRE_SAMPLE or command == DriverEvent.STOP_AUTOSAMPLE:
return self._handler_execute_resource(command, *args, **kwargs)
else:
raise InstrumentStateException('Command \'{0}\' not allowed in current state {1}'.format(command, self._fsm.get_current_state()))
def _construct_data_publishers(self):
pass
# """
# Construct the stream publishers from the stream_config agent
# config variable.
# @retval None
# """
# InstrumentAgent._construct_data_publishers(self)
def _construct_packet_factories(self):
pass
# """
# Construct packet factories from packet_config member of the
# driver_config.
# @retval None
# """
# pass
# def _clear_packet_factories(self):
# """
# Delete packet factories.
# @retval None
# """
# pass
# def _log_state_change_event(self, state):
# pass
# def _publish_instrument_agent_event(self, event_type=None, description=None):
# pass
def _handler_inactive_go_active(self, *args, **kwargs):
"""
"""
next_state = None
result = None
next_state = ResourceAgentState.IDLE
return (next_state, result)
| {
"content_hash": "d092bae988ec766d8eee935588e7edc3",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 149,
"avg_line_length": 44.08810572687225,
"alnum_prop": 0.60431654676259,
"repo_name": "ooici/coi-services",
"id": "aff76f7978bf42bea55b0e117a1b38f5d5d5d0e0",
"size": "10031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ion/agents/data/external_dataset_agent.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "403012"
},
{
"name": "C++",
"bytes": "251803"
},
{
"name": "CSS",
"bytes": "689"
},
{
"name": "Erlang",
"bytes": "532"
},
{
"name": "JavaScript",
"bytes": "11627"
},
{
"name": "Objective-C",
"bytes": "8918"
},
{
"name": "Python",
"bytes": "7964384"
},
{
"name": "Shell",
"bytes": "9221"
},
{
"name": "nesC",
"bytes": "57712131"
}
],
"symlink_target": ""
} |
from . import db, login_manager
from werkzeug.security import generate_password_hash, check_password_hash
from flask.ext.login import UserMixin, AnonymousUserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app, request, url_for
from datetime import datetime
import hashlib
from markdown import markdown
import bleach
from app.exceptions import ValidationError
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
comments = db.relationship('Comment', backref='post', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from random import seed, randint
import forgery_py
seed()
user_count = User.query.count()
for i in range(count):
u = User.query.offset(randint(0, user_count - 1)).first()
p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1, 5)),
timestamp=forgery_py.date.date(True),
author=u)
db.session.add(p)
db.session.commit()
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'p']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
# api
def to_json(self):
json_post = {
'url': url_for('api.get_post', id=self.id, _external=True),
'body': self.body,
'body_html': self.body_html,
'timestamp': self.timestamp,
'author': url_for('api.get_user', id=self.author_id,
_external=True),
'comments': url_for('api.get_post_comments', id=self.id,
_external=True),
'comments_count': self.comments.count()
}
return json_post
@staticmethod
def from_json(json_post):
body = json_post.get('body')
if body is None or body == '':
raise ValidationError('post does not have a body')
return Post(body=body)
db.event.listen(Post.body, 'set', Post.on_changed_body)
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True, index=True)
users = db.relationship('User', backref='role', lazy='dynamic')
default = db.Column(db.Boolean, default=False)
permissions = db.Column(db.Integer)
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
email = db.Column(db.String(64), unique=True, index=True)
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
posts = db.relationship('Post', backref="author", lazy='dynamic')
comments = db.relationship('Comment', backref='author', lazy='dynamic')
followed = db.relationship('Follow',
foreign_keys=[Follow.follower_id],
backref=db.backref('follower', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
followers = db.relationship('Follow',
foreign_keys=[Follow.followed_id],
backref=db.backref('followed', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
def follow(self, user):
if not self.is_following(user):
f = Follow(follower=self, followed=user)
db.session.add(f)
def unfollow(self, user):
f = self.followed.filter_by(followed_id=user.id).first()
if f:
db.session.delete(f)
def is_following(self, user):
return self.followed.filter_by(
followed_id=user.id).first() is not None
def is_followed_by(self, user):
return self.followers.filter_by(
follower_id=user.id).first() is not None
#by hand iterate over the whole user to make sure everyone follows himself/herself
@staticmethod
def add_self_follows():
for user in User.query.all():
if not user.is_following(user):
user.follow(user)
db.session.add(user)
db.session.commit()
def __init__(self, **kw):
super(User, self).__init__(**kw)
if self.role == None:
if self.email == current_app.config['BLOG_ADMIN']:
self.role = Role.query.filter_by(name='Administrator').first()
else:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash =hashlib.md5(self.email.encode('utf-8')).hexdigest()
# self followed at the first place
self.followed.append(Follow(followed=self))
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
@property
def followed_posts(self):
return Post.query.join(Follow, Follow.followed_id == Post.author_id)\
.filter(Follow.follower_id == self.id)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
username=forgery_py.internet.user_name(True),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
# api
def to_json(self):
json_user = {
'url': url_for('api.get_post', id=self.id, _external=True),
'username': self.username,
'member_since': self.member_since,
'last_seen': self.last_seen,
'posts': url_for('api.get_user_posts', id=self.id,
_external=True),
'followed_posts': url_for('api.get_user_followed_posts',
id=self.id, _external=True),
'post_count': self.posts.count()
}
return json_user
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permission):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
disabled = db.Column(db.Boolean)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'code', 'em',
'i', 'strong']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
# api
def to_json(self):
json_comment = {
'url': url_for('api.get_comment', id=self.id, _external=True),
'post': url_for('api.get_post', id=self.post_id, _external=True),
'body': self.body,
'body_html': self.body_html,
'timestamp': self.timestamp,
'author': url_for('api.get_user', id=self.author_id,
_external=True)
}
return json_comment
@staticmethod
def from_json(json_comment):
body = json_comment.get('body')
if body is None or body == '':
raise ValidationError('comment does not have a body')
return Comment(body=body)
db.event.listen(Comment.body, 'set', Comment.on_changed_body)
| {
"content_hash": "79b8380f024ed4058810ad7bf3d8f6cc",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 86,
"avg_line_length": 35.98921832884097,
"alnum_prop": 0.597663271420012,
"repo_name": "caser789/xuejiao-blog",
"id": "9b821309909809639c5cab36a1aa92cba356973f",
"size": "13352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1882"
},
{
"name": "HTML",
"bytes": "18337"
},
{
"name": "Python",
"bytes": "71056"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forum', '0005_auto_20160803_2129'),
]
operations = [
migrations.AlterModelOptions(
name='tag',
options={},
),
]
| {
"content_hash": "218ee1105c6020cdec488cdf6cbf180d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 45,
"avg_line_length": 18.352941176470587,
"alnum_prop": 0.5769230769230769,
"repo_name": "rdujardin/icforum",
"id": "a47c0298d646c764ed695f849eb6d43f306f450a",
"size": "968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "icforum/forum/migrations/0006_auto_20160803_2144.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8915"
},
{
"name": "HTML",
"bytes": "34809"
},
{
"name": "JavaScript",
"bytes": "3171"
},
{
"name": "Python",
"bytes": "100413"
}
],
"symlink_target": ""
} |
import logging
import re
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import gettext_lazy as _lazy
from timezone_field import TimeZoneField
from kitsune.lib.countries import COUNTRIES
from kitsune.products.models import Product
from kitsune.sumo.models import LocaleField, ModelBase
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import auto_delete_files
from kitsune.users.validators import TwitterValidator
log = logging.getLogger("k.users")
SHA1_RE = re.compile("^[a-f0-9]{40}$")
SET_ID_PREFIX = "https://schemas.accounts.firefox.com/event/"
class ContributionAreas(models.TextChoices):
KB = "kb-contributors", _lazy("KB Contributors")
L10N = "l10n-contributors", _lazy("L10n Contributors")
FORUM = "forum-contributors", _lazy("Forum Contributors")
SOCIAL = "social-contributors", _lazy("Social media Contributors")
MOBILE = "mobile-contributors", _lazy("Mobile support Contributors")
@classmethod
def has_value(cls, value):
return value in cls._value2member_map_
@classmethod
def get_values(cls):
return [item.value for item in cls]
@classmethod
def get_groups(cls):
"""Transitional class method that will return both old and new groups."""
return cls.get_values() + settings.LEGACY_CONTRIBUTOR_GROUPS
@classmethod
def has_member(cls, value):
return value in cls._member_names_
@auto_delete_files
class Profile(ModelBase):
"""Profile model for django users."""
user = models.OneToOneField(
User, on_delete=models.CASCADE, primary_key=True, verbose_name=_lazy("User")
)
name = models.CharField(
max_length=255, null=True, blank=True, verbose_name=_lazy("Display name")
)
public_email = models.BooleanField( # show/hide email
default=False, verbose_name=_lazy("Make my email address visible to logged in users")
)
avatar = models.ImageField(
upload_to=settings.USER_AVATAR_PATH,
null=True,
blank=True,
verbose_name=_lazy("Avatar"),
max_length=settings.MAX_FILEPATH_LENGTH,
)
bio = models.TextField(
null=True,
blank=True,
verbose_name=_lazy("Biography"),
help_text=_lazy(
"Some HTML supported: <abbr title> "
+ "<acronym title> <b> "
+ "<blockquote> <code> "
+ "<em> <i> <li> "
+ "<ol> <strong> <ul>. "
+ "Links are forbidden."
),
)
website = models.URLField(max_length=255, null=True, blank=True, verbose_name=_lazy("Website"))
twitter = models.CharField(
max_length=15,
null=True,
blank=True,
validators=[TwitterValidator],
verbose_name=_lazy("Twitter Username"),
)
community_mozilla_org = models.CharField(
max_length=255, default="", blank=True, verbose_name=_lazy("Community Portal Username")
)
people_mozilla_org = models.CharField(
max_length=255, blank=True, default="", verbose_name=_lazy("People Directory Username")
)
matrix_handle = models.CharField(
max_length=255, default="", blank=True, verbose_name=_lazy("Matrix Nickname")
)
timezone = TimeZoneField(
null=True, blank=True, default="US/Pacific", verbose_name=_lazy("Timezone")
)
country = models.CharField(
max_length=2, choices=COUNTRIES, null=True, blank=True, verbose_name=_lazy("Country")
)
# No city validation
city = models.CharField(max_length=255, null=True, blank=True, verbose_name=_lazy("City"))
locale = LocaleField(default=settings.LANGUAGE_CODE, verbose_name=_lazy("Preferred language"))
first_answer_email_sent = models.BooleanField(
default=False, help_text=_lazy("Has been sent a first answer contribution email.")
)
first_l10n_email_sent = models.BooleanField(
default=False, help_text=_lazy("Has been sent a first revision contribution email.")
)
involved_from = models.DateField(
null=True, blank=True, verbose_name=_lazy("Involved with Mozilla from")
)
csat_email_sent = models.DateField(
null=True,
blank=True,
verbose_name=_lazy("When the user was sent a community " "health survey"),
)
is_fxa_migrated = models.BooleanField(default=False)
fxa_uid = models.CharField(blank=True, null=True, unique=True, max_length=128)
fxa_avatar = models.URLField(max_length=512, blank=True, default="")
products = models.ManyToManyField(Product, related_name="subscribed_users")
fxa_password_change = models.DateTimeField(blank=True, null=True)
fxa_refresh_token = models.CharField(blank=True, default="", max_length=128)
zendesk_id = models.CharField(blank=True, default="", max_length=1024)
updated_column_name = "user__date_joined"
class Meta(object):
permissions = (
("view_karma_points", "Can view karma points"),
("deactivate_users", "Can deactivate users"),
)
def __str__(self):
try:
return str(self.user)
except Exception as exc:
return str("%d (%r)" % (self.pk, exc))
def get_absolute_url(self):
return reverse("users.profile", args=[self.user_id])
def clear(self):
"""Clears out the users profile"""
self.name = ""
self.public_email = False
self.avatar = None
self.bio = ""
self.website = ""
self.twitter = ""
self.community_mozilla_org = ""
self.people_mozilla_org = ""
self.matrix_handle = ""
self.city = ""
self.is_fxa_migrated = False
self.fxa_uid = ""
@property
def display_name(self):
return self.name if self.name else self.user.username
@classmethod
def get_serializer(cls, serializer_type="full"):
# Avoid circular import
from kitsune.users import api
if serializer_type == "full":
return api.ProfileSerializer
elif serializer_type == "fk":
return api.ProfileFKSerializer
else:
raise ValueError('Unknown serializer type "{}".'.format(serializer_type))
@property
def last_contribution_date(self):
"""Get the date of the user's last contribution."""
from kitsune.questions.models import Answer
from kitsune.wiki.models import Revision
dates = []
# Latest Support Forum answer:
try:
answer = Answer.objects.filter(creator=self.user).latest("created")
dates.append(answer.created)
except Answer.DoesNotExist:
pass
# Latest KB Revision edited:
try:
revision = Revision.objects.filter(creator=self.user).latest("created")
dates.append(revision.created)
except Revision.DoesNotExist:
pass
# Latest KB Revision reviewed:
try:
revision = Revision.objects.filter(reviewer=self.user).latest("reviewed")
# Old revisions don't have the reviewed date.
dates.append(revision.reviewed or revision.created)
except Revision.DoesNotExist:
pass
if len(dates) == 0:
return None
return max(dates)
@property
def settings(self):
return self.user.settings
@property
def answer_helpfulness(self):
# Avoid circular import
from kitsune.questions.models import AnswerVote
return AnswerVote.objects.filter(answer__creator=self.user, helpful=True).count()
@property
def is_subscriber(self):
return self.products.exists()
class Setting(ModelBase):
"""User specific value per setting"""
user = models.ForeignKey(
User, on_delete=models.CASCADE, verbose_name=_lazy("User"), related_name="settings"
)
name = models.CharField(max_length=100)
value = models.CharField(blank=True, max_length=60, verbose_name=_lazy("Value"))
class Meta(object):
unique_together = (("user", "name"),)
def __str__(self):
return "%s %s:%s" % (self.user, self.name, self.value or "[none]")
@classmethod
def get_for_user(cls, user, name):
from kitsune.users.forms import SettingsForm
form = SettingsForm()
if name not in list(form.fields.keys()):
raise KeyError(
("'{name}' is not a field in user.forms.SettingsFrom()").format(name=name)
)
try:
setting = Setting.objects.get(user=user, name=name)
except Setting.DoesNotExist:
value = form.fields[name].initial or ""
setting = Setting.objects.create(user=user, name=name, value=value)
# Cast to the field's Python type.
return form.fields[name].to_python(setting.value)
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key used for
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts.
"""
user = models.ForeignKey(
User, on_delete=models.CASCADE, unique=True, verbose_name=_lazy("user")
)
activation_key = models.CharField(verbose_name=_lazy("activation key"), max_length=40)
class Meta:
verbose_name = _lazy("registration profile")
verbose_name_plural = _lazy("registration profiles")
def __str__(self):
return "Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by:
1. The date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
return True
activation_key_expired.boolean = True
class EmailChange(models.Model):
"""Stores email with activation key when user requests a change."""
ACTIVATED = "ALREADY_ACTIVATED"
user = models.ForeignKey(
User, on_delete=models.CASCADE, unique=True, verbose_name=_lazy("user")
)
activation_key = models.CharField(verbose_name=_lazy("activation key"), max_length=40)
email = models.EmailField(db_index=True, null=True)
def __str__(self):
return "Change email request to %s for %s" % (self.email, self.user)
class Deactivation(models.Model):
"""Stores user deactivation logs."""
user = models.ForeignKey(
User, on_delete=models.CASCADE, verbose_name=_lazy("user"), related_name="+"
)
moderator = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name=_lazy("moderator"),
related_name="deactivations",
)
date = models.DateTimeField(default=datetime.now)
def __str__(self):
return "%s was deactivated by %s on %s" % (self.user, self.moderator, self.date)
class AccountEvent(models.Model):
"""Stores the events received from Firefox Accounts.
These events are processed by celery and the correct status is assigned in each entry.
"""
# Status of an event entry.
UNPROCESSED = 1
PROCESSED = 2
IGNORED = 3
NOT_IMPLEMENTED = 4
EVENT_STATUS = (
(UNPROCESSED, "unprocessed"),
(PROCESSED, "processed"),
(IGNORED, "ignored"),
(NOT_IMPLEMENTED, "not-implemented"),
)
PASSWORD_CHANGE = "password-change"
PROFILE_CHANGE = "profile-change"
SUBSCRIPTION_STATE_CHANGE = "subscription-state-change"
DELETE_USER = "delete-user"
status = models.PositiveSmallIntegerField(
choices=EVENT_STATUS, default=UNPROCESSED, blank=True
)
created_at = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
body = models.TextField(max_length=4096, blank=False)
event_type = models.CharField(max_length=256, default="", blank=True)
fxa_uid = models.CharField(max_length=128, default="", blank=True)
jwt_id = models.CharField(max_length=256)
issued_at = models.CharField(max_length=32)
profile = models.ForeignKey(
Profile, on_delete=models.CASCADE, related_name="account_events", null=True
)
class Meta(object):
ordering = ["-last_modified"]
| {
"content_hash": "5f089157ce01589a79080fed7b2185e6",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 99,
"avg_line_length": 34.312997347480106,
"alnum_prop": 0.641465677179963,
"repo_name": "mozilla/kitsune",
"id": "b0a41bf82c5c390f614d18dd0954b0386c9441d1",
"size": "12936",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "kitsune/users/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1156"
},
{
"name": "Dockerfile",
"bytes": "3027"
},
{
"name": "HTML",
"bytes": "535448"
},
{
"name": "JavaScript",
"bytes": "658477"
},
{
"name": "Jinja",
"bytes": "4837"
},
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "Nunjucks",
"bytes": "68656"
},
{
"name": "Python",
"bytes": "2827116"
},
{
"name": "SCSS",
"bytes": "240092"
},
{
"name": "Shell",
"bytes": "10759"
},
{
"name": "Svelte",
"bytes": "26864"
}
],
"symlink_target": ""
} |
cheetahVarStartToken = $
directiveStartToken = #
#end compiler-settings
# -*- coding: utf-8 -*-
# | {
"content_hash": "e00d2a8335ae994d1556eddfaecff61e",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 24,
"avg_line_length": 16.333333333333332,
"alnum_prop": 0.6938775510204082,
"repo_name": "mabotech/maboss.py",
"id": "faae1cc0c770d65929fb8c314a7f4a859f64c4a3",
"size": "117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maboss/webx/tools/templates/view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "14864"
},
{
"name": "JavaScript",
"bytes": "4950"
},
{
"name": "Lua",
"bytes": "683"
},
{
"name": "Python",
"bytes": "433923"
},
{
"name": "Shell",
"bytes": "667"
}
],
"symlink_target": ""
} |
from pprint import pprint
from django.contrib.auth.mixins import PermissionRequiredMixin
from fo2.connections import db_cursor_so
from base.views import O2BaseGetPostView
from cd.forms.add1palete import Add1PaleteForm
from cd.queries.palete import custom_add_palete
class Add1Palete(O2BaseGetPostView, PermissionRequiredMixin):
def __init__(self, *args, **kwargs):
super(Add1Palete, self).__init__(*args, **kwargs)
self.permission_required = 'cd.can_admin_pallet'
self.Form_class = Add1PaleteForm
self.form_class_has_initial = True
self.cleaned_data2self = True
self.template_name = 'cd/add1palete.html'
self.title_name = 'Adiciona Palete'
def mount_context(self):
cursor = db_cursor_so(self.request)
status, inseridos = custom_add_palete(cursor, prefix=self.tipo)
if status == "OK":
self.context['msg'] = f"Inserido palete {inseridos[0]}"
else:
self.context['msg'] = f"Erro <{status}>, inserindo palete {inseridos[0]}"
| {
"content_hash": "645da101d888f08da213f3e384c5d9d9",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 85,
"avg_line_length": 32.8125,
"alnum_prop": 0.6780952380952381,
"repo_name": "anselmobd/fo2",
"id": "e405ad00776acedaec484580c7d796df7563fba2",
"size": "1050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cd/views/add1palete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy as np
from numpy.lib.stride_tricks import as_strided
from functools import wraps, partial
################################
# distribution-form wrappers #
################################
from pylds.lds_messages import \
kalman_filter as _kalman_filter, \
rts_smoother as _rts_smoother, \
filter_and_sample as _filter_and_sample, \
kalman_filter_diagonal as _kalman_filter_diagonal, \
filter_and_sample_diagonal as _filter_and_sample_diagonal, \
filter_and_sample_randomwalk as _filter_and_sample_randomwalk, \
E_step as _E_step
def _ensure_ndim(X,T,ndim):
X = np.require(X,dtype=np.float64, requirements='C')
assert ndim-1 <= X.ndim <= ndim
if X.ndim == ndim:
assert X.shape[0] == T
return X
else:
return as_strided(X, shape=(T,)+X.shape, strides=(0,)+X.strides)
def _argcheck(mu_init, sigma_init, A, B, sigma_states, C, D, sigma_obs, inputs, data):
T = data.shape[0]
A, B, sigma_states, C, D, sigma_obs = \
map(partial(_ensure_ndim, T=T, ndim=3),
[A, B, sigma_states, C, D, sigma_obs])
# Check that the inputs are C ordered and at least 1d
inputs = np.require(inputs, dtype=np.float64, requirements='C')
data = np.require(data, dtype=np.float64, requirements='C')
return mu_init, sigma_init, A, B, sigma_states, C, D, sigma_obs, inputs, data
def _argcheck_diag_sigma_obs(mu_init, sigma_init, A, B, sigma_states, C, D, sigma_obs, inputs, data):
T = data.shape[0]
A, B, sigma_states, C, D, = \
map(partial(_ensure_ndim, T=T, ndim=3),
[A, B, sigma_states, C, D])
sigma_obs = _ensure_ndim(sigma_obs, T=T, ndim=2)
inputs = np.require(inputs, dtype=np.float64, requirements='C')
data = np.require(data, dtype=np.float64, requirements='C')
return mu_init, sigma_init, A, B, sigma_states, C, D, sigma_obs, inputs, data
def _argcheck_randomwalk(mu_init, sigma_init, sigmasq_states, sigmasq_obs, data):
T = data.shape[0]
sigmasq_states, sigmasq_obs = \
map(partial(_ensure_ndim, T=T, ndim=2),
[sigmasq_states, sigmasq_obs])
data = np.require(data, dtype=np.float64, requirements='C')
return mu_init, sigma_init, sigmasq_states, sigmasq_obs, data
def _wrap(func, check):
@wraps(func)
def wrapped(*args, **kwargs):
return func(*check(*args,**kwargs))
return wrapped
kalman_filter = _wrap(_kalman_filter,_argcheck)
rts_smoother = _wrap(_rts_smoother,_argcheck)
filter_and_sample = _wrap(_filter_and_sample,_argcheck)
E_step = _wrap(_E_step,_argcheck)
kalman_filter_diagonal = _wrap(_kalman_filter_diagonal,_argcheck_diag_sigma_obs)
filter_and_sample_diagonal = _wrap(_filter_and_sample_diagonal,_argcheck_diag_sigma_obs)
filter_and_sample_randomwalk = _wrap(_filter_and_sample_randomwalk,_argcheck_randomwalk)
###############################
# information-form wrappers #
###############################
from pylds.lds_info_messages import \
kalman_info_filter as _kalman_info_filter, \
info_E_step as _info_E_step, \
info_sample as _info_sample
def _info_argcheck(J_init, h_init, log_Z_init,
J_pair_11, J_pair_21, J_pair_22, h_pair_1, h_pair_2, log_Z_pair,
J_node, h_node, log_Z_node):
T = h_node.shape[0]
assert np.isscalar(log_Z_init)
J_node = _ensure_ndim(J_node, T=T, ndim=3)
J_pair_11, J_pair_21, J_pair_22 = \
map(partial(_ensure_ndim, T=T-1, ndim=3),
[J_pair_11, J_pair_21, J_pair_22])
h_pair_1, h_pair_2 = \
map(partial(_ensure_ndim, T=T-1, ndim=2),
[h_pair_1, h_pair_2])
log_Z_pair = _ensure_ndim(log_Z_pair, T=T-1, ndim=1)
log_Z_node = _ensure_ndim(log_Z_node, T=T, ndim=1)
h_node = np.require(h_node, dtype=np.float64, requirements='C')
return J_init, h_init, log_Z_init, \
J_pair_11, J_pair_21, J_pair_22, h_pair_1, h_pair_2, log_Z_pair,\
J_node, h_node, log_Z_node
kalman_info_filter = _wrap(_kalman_info_filter, _info_argcheck)
info_E_step = _wrap(_info_E_step, _info_argcheck)
info_sample = _wrap(_info_sample, _info_argcheck)
| {
"content_hash": "975de28e55611653125a2076d918f16e",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 101,
"avg_line_length": 37.5045045045045,
"alnum_prop": 0.6202257987028585,
"repo_name": "mattjj/pylds",
"id": "a9d79b3fa59acaf1730cf2e0dcc7e3fb4ab992f5",
"size": "4163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylds/lds_messages_interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "178517"
},
{
"name": "TeX",
"bytes": "24607"
}
],
"symlink_target": ""
} |
from binascii import hexlify
from struct import pack, unpack
import hashlib
import time
import sys
import traceback
import electrum
from electrum.bitcoin import EncodeBase58Check, DecodeBase58Check, TYPE_ADDRESS, int_to_hex, var_int
from electrum.i18n import _
from electrum.plugins import BasePlugin, hook
from electrum.keystore import Hardware_KeyStore, parse_xpubkey
from ..hw_wallet import HW_PluginBase
from electrum.util import format_satoshis_plain, print_error
try:
import hid
from btchip.btchipComm import HIDDongleHIDAPI, DongleWait
from btchip.btchip import btchip
from btchip.btchipUtils import compress_public_key,format_transaction, get_regular_input_script, get_p2sh_input_script
from btchip.bitcoinTransaction import bitcoinTransaction
from btchip.btchipFirmwareWizard import checkFirmware, updateFirmware
from btchip.btchipException import BTChipException
BTCHIP = True
BTCHIP_DEBUG = False
except ImportError:
BTCHIP = False
class Ledger_Client():
def __init__(self, hidDevice):
self.dongleObject = btchip(hidDevice)
self.preflightDone = False
def is_pairable(self):
return True
def close(self):
self.dongleObject.dongle.close()
def timeout(self, cutoff):
pass
def is_initialized(self):
return True
def label(self):
return ""
def i4b(self, x):
return pack('>I', x)
def get_xpub(self, bip32_path):
self.checkDevice()
# bip32_path is of the form 44'/0'/1'
# S-L-O-W - we don't handle the fingerprint directly, so compute
# it manually from the previous node
# This only happens once so it's bearable
#self.get_client() # prompt for the PIN before displaying the dialog if necessary
#self.handler.show_message("Computing master public key")
try:
splitPath = bip32_path.split('/')
if splitPath[0] == 'm':
splitPath = splitPath[1:]
bip32_path = bip32_path[2:]
fingerprint = 0
if len(splitPath) > 1:
prevPath = "/".join(splitPath[0:len(splitPath) - 1])
nodeData = self.dongleObject.getWalletPublicKey(prevPath)
publicKey = compress_public_key(nodeData['publicKey'])
h = hashlib.new('ripemd160')
h.update(hashlib.sha256(publicKey).digest())
fingerprint = unpack(">I", h.digest()[0:4])[0]
nodeData = self.dongleObject.getWalletPublicKey(bip32_path)
publicKey = compress_public_key(nodeData['publicKey'])
depth = len(splitPath)
lastChild = splitPath[len(splitPath) - 1].split('\'')
if len(lastChild) == 1:
childnum = int(lastChild[0])
else:
childnum = 0x80000000 | int(lastChild[0])
xpub = "0488B21E".decode('hex') + chr(depth) + self.i4b(fingerprint) + self.i4b(childnum) + str(nodeData['chainCode']) + str(publicKey)
except Exception, e:
#self.give_error(e, True)
return None
finally:
#self.handler.clear_dialog()
pass
return EncodeBase58Check(xpub)
def has_detached_pin_support(self, client):
try:
client.getVerifyPinRemainingAttempts()
return True
except BTChipException, e:
if e.sw == 0x6d00:
return False
raise e
def is_pin_validated(self, client):
try:
# Invalid SET OPERATION MODE to verify the PIN status
client.dongle.exchange(bytearray([0xe0, 0x26, 0x00, 0x00, 0x01, 0xAB]))
except BTChipException, e:
if (e.sw == 0x6982):
return False
if (e.sw == 0x6A80):
return True
raise e
def perform_hw1_preflight(self):
try:
firmware = self.dongleObject.getFirmwareVersion()['version'].split(".")
if not checkFirmware(firmware):
self.dongleObject.dongle.close()
raise Exception("HW1 firmware version too old. Please update at https://www.ledgerwallet.com")
try:
self.dongleObject.getOperationMode()
except BTChipException, e:
if (e.sw == 0x6985):
self.dongleObject.dongle.close()
self.handler.get_setup( )
# Acquire the new client on the next run
else:
raise e
if self.has_detached_pin_support(self.dongleObject) and not self.is_pin_validated(self.dongleObject) and (self.handler <> None):
remaining_attempts = self.dongleObject.getVerifyPinRemainingAttempts()
if remaining_attempts <> 1:
msg = "Enter your Ledger PIN - remaining attempts : " + str(remaining_attempts)
else:
msg = "Enter your Ledger PIN - WARNING : LAST ATTEMPT. If the PIN is not correct, the dongle will be wiped."
confirmed, p, pin = self.password_dialog(msg)
if not confirmed:
raise Exception('Aborted by user - please unplug the dongle and plug it again before retrying')
pin = pin.encode()
self.dongleObject.verifyPin(pin)
except BTChipException, e:
if (e.sw == 0x6faa):
raise Exception("Dongle is temporarily locked - please unplug it and replug it again")
if ((e.sw & 0xFFF0) == 0x63c0):
raise Exception("Invalid PIN - please unplug the dongle and plug it again before retrying")
raise e
def checkDevice(self):
if not self.preflightDone:
try:
self.perform_hw1_preflight()
except BTChipException as e:
if (e.sw == 0x6d00):
raise BaseException("Device not in Bitcoin mode")
raise e
self.preflightDone = True
def password_dialog(self, msg=None):
response = self.handler.get_word(msg)
if response is None:
return False, None, None
return True, response, response
class Ledger_KeyStore(Hardware_KeyStore):
hw_type = 'ledger'
device = 'Ledger'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.signing = False
self.cfg = d.get('cfg', {'mode':0,'pair':''})
def dump(self):
obj = Hardware_KeyStore.dump(self)
obj['cfg'] = self.cfg
return obj
def get_derivation(self):
return self.derivation
def get_client(self):
return self.plugin.get_client(self)
def give_error(self, message, clear_client = False):
print_error(message)
if not self.signing:
self.handler.show_error(message)
else:
self.signing = False
if clear_client:
self.client = None
raise Exception(message)
def address_id_stripped(self, address):
# Strip the leading "m/"
change, index = self.get_address_index(address)
derivation = self.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
return address_path[2:]
def decrypt_message(self, pubkey, message, password):
raise RuntimeError(_('Encryption and decryption are currently not supported for %s') % self.device)
def sign_message(self, sequence, message, password):
self.signing = True
# prompt for the PIN before displaying the dialog if necessary
client = self.get_client()
address_path = self.get_derivation()[2:] + "/%d/%d"%sequence
self.handler.show_message("Signing message ...")
try:
info = self.get_client().signMessagePrepare(address_path, message)
pin = ""
if info['confirmationNeeded']:
pin = self.handler.get_auth( info ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning(_('Cancelled by user'))
pin = str(pin).encode()
signature = self.get_client().signMessageSign(pin)
except BTChipException, e:
if e.sw == 0x6a80:
self.give_error("Unfortunately, this message cannot be signed by the Ledger wallet. Only alphanumerical messages shorter than 140 characters are supported. Please remove any extra characters (tab, carriage return) and retry.")
else:
self.give_error(e, True)
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return ''
except Exception, e:
self.give_error(e, True)
finally:
self.handler.clear_dialog()
self.signing = False
# Parse the ASN.1 signature
rLength = signature[3]
r = signature[4 : 4 + rLength]
sLength = signature[4 + rLength + 1]
s = signature[4 + rLength + 2:]
if rLength == 33:
r = r[1:]
if sLength == 33:
s = s[1:]
r = str(r)
s = str(s)
# And convert it
return chr(27 + 4 + (signature[0] & 0x01)) + r + s
def sign_transaction(self, tx, password):
if tx.is_complete():
return
client = self.get_client()
self.signing = True
inputs = []
inputsPaths = []
pubKeys = []
chipInputs = []
redeemScripts = []
signatures = []
preparedTrustedInputs = []
changePath = ""
changeAmount = None
output = None
outputAmount = None
p2shTransaction = False
reorganize = False
pin = ""
self.get_client() # prompt for the PIN before displaying the dialog if necessary
# Fetch inputs of the transaction to sign
derivations = self.get_tx_derivations(tx)
for txin in tx.inputs():
if txin.get('is_coinbase'):
self.give_error("Coinbase not supported") # should never happen
if len(txin['pubkeys']) > 1:
p2shTransaction = True
for i, x_pubkey in enumerate(txin['x_pubkeys']):
if x_pubkey in derivations:
signingPos = i
s = derivations.get(x_pubkey)
hwAddress = "%s/%d/%d" % (self.get_derivation()[2:], s[0], s[1])
break
else:
self.give_error("No matching x_key for sign_transaction") # should never happen
inputs.append([txin['prev_tx'].raw, txin['prevout_n'], txin.get('redeemScript'), txin['prevout_hash'], signingPos ])
inputsPaths.append(hwAddress)
pubKeys.append(txin['pubkeys'])
# Sanity check
if p2shTransaction:
for txinput in tx.inputs():
if len(txinput['pubkeys']) < 2:
self.give_error("P2SH / regular input mixed in same transaction not supported") # should never happen
txOutput = var_int(len(tx.outputs()))
for txout in tx.outputs():
output_type, addr, amount = txout
txOutput += int_to_hex(amount, 8)
script = tx.pay_script(output_type, addr)
txOutput += var_int(len(script)/2)
txOutput += script
txOutput = txOutput.decode('hex')
# Recognize outputs - only one output and one change is authorized
if not p2shTransaction:
if len(tx.outputs()) > 2: # should never happen
self.give_error("Transaction with more than 2 outputs not supported")
for _type, address, amount in tx.outputs():
assert _type == TYPE_ADDRESS
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
changePath = self.get_derivation()[2:] + "/%d/%d"%index
changeAmount = amount
else:
output = address
outputAmount = amount
self.handler.show_message(_("Confirm Transaction on your Ledger device..."))
try:
# Get trusted inputs from the original transactions
for utxo in inputs:
if not p2shTransaction:
txtmp = bitcoinTransaction(bytearray(utxo[0].decode('hex')))
chipInputs.append(self.get_client().getTrustedInput(txtmp, utxo[1]))
redeemScripts.append(txtmp.outputs[utxo[1]].script)
else:
tmp = utxo[3].decode('hex')[::-1].encode('hex')
tmp += int_to_hex(utxo[1], 4)
chipInputs.append({'value' : tmp.decode('hex')})
redeemScripts.append(bytearray(utxo[2].decode('hex')))
# Sign all inputs
firstTransaction = True
inputIndex = 0
rawTx = tx.serialize()
self.get_client().enableAlternate2fa(False)
while inputIndex < len(inputs):
self.get_client().startUntrustedTransaction(firstTransaction, inputIndex,
chipInputs, redeemScripts[inputIndex])
if not p2shTransaction:
outputData = self.get_client().finalizeInput(output, format_satoshis_plain(outputAmount),
format_satoshis_plain(tx.get_fee()), changePath, bytearray(rawTx.decode('hex')))
reorganize = True
else:
outputData = self.get_client().finalizeInputFull(txOutput)
outputData['outputData'] = txOutput
if firstTransaction:
transactionOutput = outputData['outputData']
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.clear_dialog()
pin = self.handler.get_auth( outputData ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning()
if pin != 'paired':
self.handler.show_message(_("Confirmed. Signing Transaction..."))
else:
# Sign input with the provided PIN
inputSignature = self.get_client().untrustedHashSign(inputsPaths[inputIndex], pin)
inputSignature[0] = 0x30 # force for 1.4.9+
signatures.append(inputSignature)
inputIndex = inputIndex + 1
if pin != 'paired':
firstTransaction = False
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.give_error(e, True)
finally:
self.handler.clear_dialog()
# Reformat transaction
inputIndex = 0
while inputIndex < len(inputs):
if p2shTransaction:
signaturesPack = [signatures[inputIndex]] * len(pubKeys[inputIndex])
inputScript = get_p2sh_input_script(redeemScripts[inputIndex], signaturesPack)
preparedTrustedInputs.append([ ("\x00" * 4) + chipInputs[inputIndex]['value'], inputScript ])
else:
inputScript = get_regular_input_script(signatures[inputIndex], pubKeys[inputIndex][0].decode('hex'))
preparedTrustedInputs.append([ chipInputs[inputIndex]['value'], inputScript ])
inputIndex = inputIndex + 1
updatedTransaction = format_transaction(transactionOutput, preparedTrustedInputs)
updatedTransaction = hexlify(updatedTransaction)
if reorganize:
tx.update(updatedTransaction)
else:
tx.update_signatures(updatedTransaction)
self.signing = False
class LedgerPlugin(HW_PluginBase):
libraries_available = BTCHIP
keystore_class = Ledger_KeyStore
client = None
DEVICE_IDS = [
(0x2581, 0x1807), # HW.1 legacy btchip
(0x2581, 0x2b7c), # HW.1 transitional production
(0x2581, 0x3b7c), # HW.1 ledger production
(0x2581, 0x4b7c), # HW.1 ledger test
(0x2c97, 0x0000), # Blue
(0x2c97, 0x0001) # Nano-S
]
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def btchip_is_connected(self, keystore):
try:
self.get_client(keystore).getFirmwareVersion()
except Exception as e:
return False
return True
def get_btchip_device(self, device):
ledger = False
if (device.product_key[0] == 0x2581 and device.product_key[1] == 0x3b7c) or (device.product_key[0] == 0x2581 and device.product_key[1] == 0x4b7c) or (device.product_key[0] == 0x2c97):
ledger = True
dev = hid.device()
dev.open_path(device.path)
dev.set_nonblocking(True)
return HIDDongleHIDAPI(dev, ledger, BTCHIP_DEBUG)
def create_client(self, device, handler):
self.handler = handler
client = self.get_btchip_device(device)
if client <> None:
client = Ledger_Client(client)
return client
def setup_device(self, device_info, wizard):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
#client.handler = wizard
client.handler = self.create_handler(wizard)
#client.get_xpub('m')
client.get_xpub("m/44'/0'") # TODO replace by direct derivation once Nano S > 1.1
def get_xpub(self, device_id, derivation, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
#client.handler = wizard
client.handler = self.create_handler(wizard)
client.checkDevice()
xpub = client.get_xpub(derivation)
return xpub
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
#assert self.main_thread != threading.current_thread()
devmgr = self.device_manager()
handler = keystore.handler
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
#if client:
# client.used()
if client <> None:
client.checkDevice()
client = client.dongleObject
return client
| {
"content_hash": "468a463eecef7088e2b63a7436700809",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 242,
"avg_line_length": 40.66736842105263,
"alnum_prop": 0.5694983693120049,
"repo_name": "cryptapus/electrum-myr",
"id": "01f5361aabf7b1fd2de31dd42a67af51c4e3431f",
"size": "19317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/ledger/ledger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3867"
},
{
"name": "Makefile",
"bytes": "844"
},
{
"name": "NSIS",
"bytes": "6930"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Python",
"bytes": "1169374"
},
{
"name": "Shell",
"bytes": "7028"
}
],
"symlink_target": ""
} |
import pathmagic
import sys
from bottle import route
from bottle import request
from bottle import response
from Utils.Functions import jsonize
from bson.json_util import dumps
from Utils.Functions import clean_hash
from Utils.Functions import check_hashes
from Utils.Functions import change_date_to_str
from Utils.Functions import to_bool
from Utils.Functions import get_file_id
from Utils.Functions import add_error
from Utils.Functions import valid_hash
from PackageControl.PackageController import *
from MetaControl.MetaController import *
from VersionControl.VersionController import *
from process_hash import generic_process_hash
from virusTotalApi import get_av_result
from virusTotalApi import save_file_from_vt
from Utils.mailSender import send_mail
import datetime
from IPython import embed
import time
import logging
from Utils.task import save
from Utils.task import get_task
from Utils.task import add_task
from Utils.task import load_task
@route('/api/v1/task', method='OPTIONS')
def enable_cors_for_task():
return 0
# Return true if the task has
# finished. False otherwise.
@route('/api/v1/task_finished', method='GET')
def task_finished():
task_id = request.query.get('task_id')
task = get_task(task_id)
return dumps({"has_finished": task.get('date_end') is not None})
@route('/api/v1/task', method='GET')
def api_get_task():
task_id = request.query.get('task_id')
return dumps(get_task(task_id))
@route('/api/v1/task', method='POST')
def task():
requested = {
'file_hash': request.forms.get('file_hash'),
'vt_av': to_bool(request.forms.get('vt_av')),
'vt_samples': to_bool(request.forms.get('vt_samples')),
'process': to_bool(request.forms.get('process')),
'email': request.forms.get('email'),
'document_name': request.forms.get('document_name'),
'ip': request.environ.get('REMOTE_ADDR')}
task_id = add_task(requested)
return dumps({"task_id": task_id})
# def generic_task(process, file_hash, vt_av, vt_samples, email, task_id,
# document_name="",ip="127.0.0.1"):
def generic_task(task_id):
response = load_task(task_id)
if response.get('date_end') is not None:
logging.error(
"Task already done. why was this on the queue? task_id=" + str(task_id))
return response
process = response['requested']['process']
file_hash = response['requested']['file_hash']
vt_av = response['requested']['vt_av']
vt_samples = response['requested']['vt_samples']
email = response['requested']['email']
document_name = response['requested'].get('document_name', '')
ip = response['requested']['ip']
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.info("task_id=" + str(task_id))
logging.info("response['requested']=" + str(response['requested']))
generic_count = 0
response = {}
response["date_start"] = datetime.datetime.now()
response["document_name"] = document_name
response["task_id"] = task_id
response["ip"] = ip
check_hashes_output = check_hashes(file_hash)
errors = check_hashes_output.get('errors')
for error in errors:
key = error.get('error')
value = error.get('error_message')
logging.error("errors (key=" + str(key) +
", value=" + str(value) + ")")
response = add_error(response, key, value)
hashes = check_hashes_output.get('hashes')
remove_dups_output = remove_dups(hashes)
# remove duplicated hashes
hashes = remove_dups_output.get('list')
response["duplicated_hashes"] = remove_dups_output.get('dups')
response["hashes"] = hashes
hash_dicts = []
mc = MetaController()
for x in hashes:
x_dict = {}
x_dict["original"] = x
x_dict["sha1"] = get_file_id(x)
if(x_dict["sha1"] is not None):
doc = mc.read(x_dict["sha1"])
if doc is not None and doc.get('hash') is not None:
if doc.get('hash').get('md5') is not None:
x_dict["md5"] = doc.get('hash').get('md5')
if doc.get('hash').get('sha2') is not None:
x_dict["sha2"] = doc.get('hash').get('sha2')
hash_dicts.append(x_dict)
response["duplicated_samples"] = []
for x in hash_dicts:
for y in hash_dicts:
if x.get('original') != y.get('original') and (
x.get('original') == y.get('sha1') or
x.get('original') == y.get('md5') or
x.get('original') == y.get('sha2')):
response["duplicated_samples"].append(y.get('original'))
hash_dicts.remove(y)
hashes = []
for x in hash_dicts:
hashes.append(x.get('original'))
response["hashes"] = hashes
if(len(hashes) == 0):
response = add_error(response, 6, "No valid hashes provided.")
response["date_end"] = datetime.datetime.now()
save(response)
return change_date_to_str(response)
save(response)
response["private_credits_spent"] = 0
response["inconsistencies"] = []
if(vt_samples or process):
for hash_id in hashes:
inconsistency_output = fix_inconsistency(hash_id)
if inconsistency_output.get('inconsistency'):
response["inconsistencies"].append(hash_id)
if inconsistency_output.get('credit_spent'):
response["private_credits_spent"] += 1
save(response)
response["not_found_on_vt"] = []
if vt_samples:
response["downloaded"] = []
for hash_id in hashes:
if(get_file_id(hash_id) is None or db_inconsistency(hash_id)):
logging.debug("task(): " + hash_id +
" was not found (get_file_id returned None). ")
generic_count += 1
if (generic_count % 20 == 0):
save(response)
output = save_file_from_vt(hash_id)
sha1 = output.get('hash')
if(output.get('status') == 'out_of_credits'):
request_successful = False
while not request_successful:
output = save_file_from_vt(hash_id)
if output.get('status') != 'out_of_credits':
request_successful = True
if(output.get('status') == 'added'):
response["downloaded"].append(hash_id)
# we need to process the sha1, and not the sha2 because
# the grid does not save the sha2.
generic_process_hash(sha1)
response["private_credits_spent"] += 1
elif(output.get('status') == 'inconsistency_found'):
response["private_credits_spent"] += 1
generic_process_hash(sha1)
elif(output.get('status') == 'not_found'):
response["not_found_on_vt"].append(hash_id)
else:
logging.error("task_id=" + str(task_id))
logging.error(str(output))
response = add_error(
response, 11, "Unknown error when downloading sample from VT.")
save(response)
save(response)
response["processed"] = []
response["not_found_for_processing"] = []
if process:
logging.debug("process=true")
for hash_id in hashes:
logging.debug("task: hash_id=" + str(hash_id))
process_start_time = datetime.datetime.now()
generic_count += 1
if (generic_count % 20 == 0):
save(response)
if(generic_process_hash(hash_id) == 0):
process_end_time = datetime.datetime.now()
response["processed"].append({"hash": hash_id,
"seconds": (process_end_time - process_start_time).seconds})
else:
response["not_found_for_processing"].append(hash_id)
save(response)
if vt_av:
response["vt_av_added"] = []
response["vt_av_out_of_credits"] = []
response["not_found_on_vt_av"] = []
response["vt_av_already_downloaded"] = []
response["public_credits_spent"] = 0
for hash_id in hashes:
sha1 = get_file_id(hash_id)
if(sha1 is not None):
av_result_output = get_av_result(sha1)
if (av_result_output.get('status') == 'out_of_credits'):
request_successful = False
count = 0
while not request_successful:
av_result_output = get_av_result(sha1)
count += 1
if av_result_output.get('status') != 'out_of_credits':
response["vt_av_out_of_credits"].append(hash_id)
response = add_error(response, 10, "Had to retried " + str(count) + " times in av_result(out_of_credits) for hash= " + str(
hash_id) + ". Is someone else using the same public key?")
request_successful = True
if(av_result_output.get('status') == "added"):
response["vt_av_added"].append(hash_id)
response["public_credits_spent"] += 1
elif(av_result_output.get('status') == "already_had_it"):
response["vt_av_already_downloaded"].append(hash_id)
elif(av_result_output.get('status') == 'error'):
response = add_error(
response, 9, "Error in av_result: " + str(av_result_output.get('error_message')))
elif(av_result_output.get('status') == 'not_found'):
response["not_found_on_vt_av"].append(hash_id)
else:
logging.error("task_id=" + str(task_id))
logging.error("unknown error in av_result: " +
str(hash_id) + " ; " + str(av_result_output))
response = add_error(
response, 12, "Unknown error in av_result()")
save(response)
if(bool(email)):
send_mail(email, "task done", str(response))
response["date_end"] = datetime.datetime.now()
save(response)
return response
# Fix db inconsistencies
# This can happen in old setups
def fix_inconsistency(file_hash):
status = db_inconsistency(file_hash)
if status == 1 or status == 3:
generic_process_hash(file_hash)
return {"inconsistency": True, "fixed": True, "credit_spent": False}
elif status == 2 and envget('spend_credit_to_fix_inconsistency'):
file_id = get_file_id(file_hash)
save_file_from_vt(file_id)
return {"inconsistency": True, "fixed": True, "credit_spent": True}
elif status == 2 and not envget('spend_credit_to_fix_inconsistency'):
return {"inconsistency": True, "fixed": False}
elif status == 0:
return {"inconsistency": False, "fixed": False}
# The DB is consistent if the
# file has sample, meta and version,
# or nothing. Is inconsistent otherwise.
# returns 0 if everything is ok
# returns 1 if hash has sample, but not meta
# returns 2 if hash has meta, but not sample
# returns 3 if hash has meta and sample, but not version
def db_inconsistency(file_hash):
if(not valid_hash(file_hash)):
raise ValueError("db_inconsistency invalid hash")
pc = PackageController()
v = VersionController()
file_id = get_file_id(file_hash)
if file_id is not None: # meta exists
file_bin = pc.getFile(file_id)
if file_bin is not None: # sample exists
version = v.searchVersion(file_id)
if version is not None:
return 0 # ok
else: # version does not exist
logging.info(
"inconsistency: meta and sample exists. Version does not")
return 3
else: # has meta but not sample
logging.info("inconsistency: meta exists, sample does not")
return 2
else: # does not have meta
if len(file_hash) == 64:
return 0 # cant search in grid by sha256
if len(file_hash) == 40:
file_bin = pc.getFile(file_hash)
else: # md5
sha1 = pc.md5_to_sha1(file_hash)
if sha1 is None:
return 0 # does not have meta or sample
file_bin = pc.getFile(file_hash)
if file_bin is None:
return 0
else:
logging.info("inconsistency: does not have meta. has sample")
return 1
def remove_dups(biglist):
known_links = set()
newlist = []
dups = []
for d in biglist:
link = d
if link in known_links:
dups.append(link)
continue
newlist.append(d)
known_links.add(link)
biglist[:] = newlist
return {'list': biglist, 'dups': dups}
| {
"content_hash": "0f3bc88a66a5470763cd06dd923583aa",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 151,
"avg_line_length": 39.92705167173252,
"alnum_prop": 0.5658495736906212,
"repo_name": "codexgigassys/codex-backend",
"id": "d06ccec86d2f567f8c54648fc40eaebafca91ef5",
"size": "13136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Api/task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2944"
},
{
"name": "Python",
"bytes": "292728"
},
{
"name": "Shell",
"bytes": "664"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(self, plotly_name="colorscale", parent_name="choropleth", **kwargs):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "5aa6ccc2e9c839fca17084fa54ab5301",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 41.53846153846154,
"alnum_prop": 0.6240740740740741,
"repo_name": "plotly/python-api",
"id": "ad663bfba380e78f96026e6b8119f84bb9fd2d1a",
"size": "540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/choropleth/_colorscale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import synthtool as s
from synthtool.languages import java
for library in s.get_staging_dirs():
# put any special-case replacements here
s.move(library)
s.remove_staging_dirs()
java.common_templates(
excludes=[
".kokoro/build.sh",
".kokoro/nightly/samples.cfg",
".kokoro/presubmit/samples.cfg",
".kokoro/presubmit/integration.cfg",
".kokoro/nightly/integration.cfg",
]
)
| {
"content_hash": "741b03552842280f7d0e420402d9c321",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 44,
"avg_line_length": 23.88888888888889,
"alnum_prop": 0.6604651162790698,
"repo_name": "googleapis/java-bigqueryconnection",
"id": "20a40a132d70a29cd4ece06d8fc59682ecd99f86",
"size": "1006",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "owlbot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "801"
},
{
"name": "Java",
"bytes": "1716796"
},
{
"name": "Python",
"bytes": "1006"
},
{
"name": "Shell",
"bytes": "20361"
}
],
"symlink_target": ""
} |
from django.views.generic import View
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.urls import reverse_lazy
from django.http import HttpResponseRedirect
# Local Django
from user.decorators import is_health_professional
from chat.models import Message
@method_decorator(login_required, name='dispatch')
@method_decorator(is_health_professional, name='dispatch')
class UnarchiveMessageHealthProfessionalView(View):
'''
View to unarchive messages.
'''
def post(self, pk):
message = Message.objects.get(pk=pk)
message.is_active_health_professional = True
message.save()
return HttpResponseRedirect(reverse_lazy('archive_box_health_professional'))
| {
"content_hash": "8e2ec3cb42177e647cac6e0df58fdae6",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 84,
"avg_line_length": 33.608695652173914,
"alnum_prop": 0.7658473479948253,
"repo_name": "fga-gpp-mds/2017.2-Receituario-Medico",
"id": "6e95386d615d05b7ff5b2fbc8319c50cbf67d2e0",
"size": "782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "medical_prescription/chat/views/unarchive_message_health_professional_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2123328"
},
{
"name": "CoffeeScript",
"bytes": "102158"
},
{
"name": "HTML",
"bytes": "2703462"
},
{
"name": "JavaScript",
"bytes": "7544427"
},
{
"name": "Makefile",
"bytes": "1369"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PowerShell",
"bytes": "471"
},
{
"name": "Python",
"bytes": "627321"
},
{
"name": "Ruby",
"bytes": "1030"
},
{
"name": "Shell",
"bytes": "3774"
}
],
"symlink_target": ""
} |
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.GL import glget
EXTENSION_NAME = 'GL_SGIS_point_line_texgen'
_p.unpack_constants( """GL_EYE_DISTANCE_TO_POINT_SGIS 0x81F0
GL_OBJECT_DISTANCE_TO_POINT_SGIS 0x81F1
GL_EYE_DISTANCE_TO_LINE_SGIS 0x81F2
GL_OBJECT_DISTANCE_TO_LINE_SGIS 0x81F3
GL_EYE_POINT_SGIS 0x81F4
GL_OBJECT_POINT_SGIS 0x81F5
GL_EYE_LINE_SGIS 0x81F6
GL_OBJECT_LINE_SGIS 0x81F7""", globals())
def glInitPointLineTexgenSGIS():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| {
"content_hash": "665172925aee548240eeff8f7984d2e3",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.7742424242424243,
"repo_name": "frederica07/Dragon_Programming_Process",
"id": "2b447ed85c97d3ee25f329bb43a5a77594e390c7",
"size": "660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyOpenGL-3.0.2/OpenGL/raw/GL/SGIS/point_line_texgen.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1548"
},
{
"name": "Python",
"bytes": "2558317"
}
],
"symlink_target": ""
} |
from config import *
from rb import AMQP
import json
from optparse import OptionParser
if __name__ == "__main__":
opt_parse = OptionParser()
# opt_parse.add_option('-r', '--routing-key', dest = 'routing_key',
# help = "Routing key for message (ex: myalert.im)")
# opt_parse.add_option('-m', '--message', dest = 'message',
# help = "Message text")
opt_parse.add_option('-i', '--image-id', dest = 'image_id',
help = "Image id")
opt_parse.add_option('-u', '--user-id', dest = 'user_id',
help = "User id")
opt_parse.add_option('-p', '--image-path', dest = 'image_path',
help = "Image path")
args = opt_parse.parse_args()[0]
message = json.dumps({
'image_id': args.image_id,
'user_id': args.user_id,
'image_path': args.image_path
})
amqp = AMQP(USERNAME,PASSWORD,HOST,PORT,PATH)
amqp.connect()
amqp.send(exchange = EXCHANGE_NAME, _type = TOPIC,
content_type = "application/json", message = message, delivery_mode = 2)
amqp.close()
| {
"content_hash": "d75724a3f5286d819f476ef990618209",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 34.42857142857143,
"alnum_prop": 0.6369294605809128,
"repo_name": "enixdark/rabbitmq-py",
"id": "fce7c501c4d42748d2fa99e9a0e8298a6197124d",
"size": "964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ria/upload/upload_producer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5296"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0015_auto_20170404_1819'),
]
operations = [
migrations.CreateModel(
name='DataType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100)),
('short_name', models.CharField(max_length=20)),
('unit', models.CharField(max_length=40)),
],
),
migrations.AddField(
model_name='sensor',
name='data_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='climate_data.DataType'),
),
]
| {
"content_hash": "6e09d384c64ce3bd5e6969c42d8cab02",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 120,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.5786407766990291,
"repo_name": "qubs/data-centre",
"id": "8f6be52637447ebfb7f9bf4f428b4f0d45dab701",
"size": "1103",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "climate_data/migrations/0016_auto_20170406_2219.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1881"
},
{
"name": "HTML",
"bytes": "3468"
},
{
"name": "Python",
"bytes": "88671"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from flask import Flask, g, render_template, render_template_string, abort
from flask_sqlalchemy import SQLAlchemy
from flask_perm import Perm
from flask_script import Manager
app = Flask(__name__)
manager = Manager(app)
db = SQLAlchemy()
perm = Perm()
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = 'secret key'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/flask_perm.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['PERM_ADMIN_ECHO'] = True
db.app = app
db.init_app(app)
perm.app = app
perm.init_app(app)
perm.register_commands(manager)
class User(namedtuple('User', 'id nickname')):
pass
@app.before_request
def before_request():
g.user = User(**{'id': 1, 'nickname': 'user1'})
@perm.user_loader
def load_user(user_id):
return User(**{'id': user_id, 'nickname': 'user%d' % user_id})
@perm.users_loader
def load_users(filter_by, sort_field, sort_dir, offset, limit):
return [User(**{'id': id, 'nickname': 'user%d' % id}) for id in range(20)]
@perm.current_user_loader
def load_current_user():
return g.user
@app.errorhandler(perm.Denied)
def permission_denied(e):
return 'FORBIDDEN', 403
@app.route('/post/publish')
@perm.require_permission('post.publish')
def publish_post():
return 'Hey, you can publish post!'
@app.route('/post/publish/template')
def template_level_visible():
return render_template_string("""
{% if require_permission('post.publish') %}
Hey, you can publish post!
{% else %}
No, you can't see this.
{% endif %}
""")
if __name__ == '__main__':
"""
To create superadmin, run
$ python example.py perm create_superadmin your_admin_account
Please input password:
Please input password again:
Success!
To run server, run
$ python example.py runserver
"""
manager.run()
| {
"content_hash": "76232afb82040d00847607d1f80f5d0c",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 78,
"avg_line_length": 25.472972972972972,
"alnum_prop": 0.6694960212201592,
"repo_name": "soasme/flask-perm",
"id": "a66263a6b6520af8e152461a48ddcab37a62c86a",
"size": "1909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1448"
},
{
"name": "JavaScript",
"bytes": "5557"
},
{
"name": "Makefile",
"bytes": "378"
},
{
"name": "Python",
"bytes": "60761"
}
],
"symlink_target": ""
} |
import itertools
import os
import sys
import pytest
from c7n.config import Config
from c7n.loader import PolicyLoader
from c7n.provider import clouds
from c7n.resources import load_resources
from c7n.schema import ElementSchema
from c7n.utils import yaml_load
from .common import BaseTest # NOQA - loads providers for individual module testing
def get_doc_examples(resources):
policies = []
seen = set()
for resource_name, v in resources.items():
for k, cls in itertools.chain(v.filter_registry.items(), v.action_registry.items()):
if cls in seen:
continue
seen.add(cls)
doc = ElementSchema.doc(cls)
if not doc:
continue
# split on yaml and new lines
split_doc = [x.split('\n\n') for x in doc.split('yaml')]
for item in itertools.chain.from_iterable(split_doc):
if 'policies:\n' in item:
policies.append((item, resource_name, cls.type))
elif 'resource:' in item:
item = 'policies:\n' + item
policies.append((item, resource_name, cls.type))
return policies
def get_doc_policies(resources):
""" Retrieve all unique policies from the list of resources.
Duplicate policy is a policy that uses same name but has different set of
actions and/or filters.
Input a resource list.
Returns policies map (name->policy) and a list of duplicate policy names.
"""
policies = {}
duplicate_names = set()
for ptext, resource_name, el_name in get_doc_examples(resources):
try:
data = yaml_load(ptext)
except Exception:
print('failed %s %s\n %s' % (resource_name, el_name, ptext))
raise
for p in data.get('policies', []):
if p['name'] in policies:
if policies[p['name']] != p:
print('duplicate %s %s %s' % (
resource_name, el_name, p['name']))
duplicate_names.add(p['name'])
else:
policies[p['name']] = p
if duplicate_names:
print('If you see this error, there are some policies with the same name but different '
'set of filters and/or actions.\n'
'Please make sure you\'re using unique names for different policies.\n')
print('Duplicate policy names:')
for d in duplicate_names:
print('\t{0}'.format(d))
raise AssertionError("Duplication doc policy names")
return policies
skip_condition = not (
# Okay slightly gross, basically if we're explicitly told via
# env var to run doc tests do it.
(os.environ.get("C7N_TEST_DOC") in ('yes', 'true') or
# Or for ci to avoid some tox pain, we'll auto configure here
# to run on the py3.6 test runner, as its the only one
# without additional responsibilities.
(os.environ.get('C7N_TEST_RUN') and
sys.version_info.major == 3 and
sys.version_info.minor == 6)))
@pytest.mark.skipif(skip_condition, reason="Doc tests must be explicitly enabled with C7N_DOC_TEST")
@pytest.mark.parametrize("provider_name", ('aws', 'azure', 'gcp', 'k8s'))
def test_doc_examples(provider_name):
load_resources()
loader = PolicyLoader(Config.empty())
provider = clouds.get(provider_name)
policies = get_doc_policies(provider.resources)
for p in policies.values():
loader.load_data({'policies': [p]}, 'memory://')
for p in policies.values():
# Note max name size here is 54 if it a lambda policy given
# our default prefix custodian- to stay under 64 char limit on
# lambda function names. This applies to AWS and GCP, and
# afaict Azure.
if len(p['name']) >= 54 and 'mode' in p:
raise ValueError(
"doc policy exceeds name limit policy:%s" % (p['name']))
| {
"content_hash": "c944a62693ec53e3176dfa8b1fae4b57",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 100,
"avg_line_length": 35.89090909090909,
"alnum_prop": 0.6066362715298885,
"repo_name": "capitalone/cloud-custodian",
"id": "28b406939ad8e088b91195ed2a4b6e6c75a9b0eb",
"size": "4070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_doc_examples.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2190"
},
{
"name": "Go",
"bytes": "135995"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9378"
},
{
"name": "Python",
"bytes": "3693572"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
} |
from flask import Flask, Blueprint
import random
from elasticsearch import Elasticsearch
from config import es_url, es_port, admin_username, admin_password, db_conn_str
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.bcrypt import Bcrypt
from flask.ext.login import LoginManager
from flask.ext.admin import Admin
from flask.ext.admin.contrib.sqla import ModelView
app = Flask(__name__, static_folder='unicorn/static',
static_url_path='/unicorn/static')
app.secret_key = str(random.SystemRandom().random())
app.config['SQLALCHEMY_DATABASE_URI'] = db_conn_str
db = SQLAlchemy(app)
flask_bcrypt = Bcrypt(app)
login_manager = LoginManager()
login_manager.init_app(app)
from app.models import *
from app.admin_view import UserView,OrgView,AdminView
admin = Admin(app, index_view=AdminView())
admin.add_view(UserView(db.session))
admin.add_view(OrgView(db.session))
es = Elasticsearch(es_url, port=es_port)
from app import views
from app.views import uni
app.register_blueprint(uni)
| {
"content_hash": "f3956951c50c343ca553b3ead616b636",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 29.529411764705884,
"alnum_prop": 0.7768924302788844,
"repo_name": "wgmueller1/unicorn",
"id": "8f2c3a777960a150e470ed4b7daa0cf7ec5a7ad6",
"size": "1004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "483067"
},
{
"name": "HTML",
"bytes": "4934113"
},
{
"name": "JavaScript",
"bytes": "7559289"
},
{
"name": "PHP",
"bytes": "26038"
},
{
"name": "Python",
"bytes": "93289"
},
{
"name": "Shell",
"bytes": "6636"
}
],
"symlink_target": ""
} |
"""
fabulous.utils
~~~~~~~~~~~~~~
Miscellaneous utilities for Fabulous.
"""
import os
import sys
import fcntl
import struct
import termios
import textwrap
import functools
from fabulous import grapefruit
def memoize(function):
"""A very simple memoize decorator to optimize pure-ish functions
Don't use this unless you've examined the code and see the
potential risks.
"""
cache = {}
@functools.wraps(function)
def _memoize(*args):
if args in cache:
return cache[args]
result = function(*args)
cache[args] = result
return result
return function
class TerminalInfo(object):
"""Quick and easy access to some terminal information
I'll tell you the terminal width/height and it's background color.
You don't need to use me directly. Just access the global
:data:`term` instance::
>>> assert term.width > 0
>>> assert term.height > 0
It's important to know the background color when rendering PNG
images with semi-transparency. Because there's no way to detect
this, black will be the default::
>>> term.bgcolor
(0.0, 0.0, 0.0, 1.0)
>>> from fabulous import grapefruit
>>> isinstance(term.bgcolor, grapefruit.Color)
True
If you use a white terminal, you'll need to manually change this::
>>> term.bgcolor = 'white'
>>> term.bgcolor
(1.0, 1.0, 1.0, 1.0)
>>> term.bgcolor = grapefruit.Color.NewFromRgb(0.0, 0.0, 0.0, 1.0)
>>> term.bgcolor
(0.0, 0.0, 0.0, 1.0)
"""
def __init__(self, bgcolor='black'):
self.bgcolor = bgcolor
@property
def termfd(self):
"""Returns file descriptor number of terminal
This will look at all three standard i/o file descriptors and
return whichever one is actually a TTY in case you're
redirecting i/o through pipes.
"""
for fd in (2, 1, 0):
if os.isatty(fd):
return fd
raise Exception("No TTY could be found")
@property
def dimensions(self):
"""Returns terminal dimensions
Don't save this information for long periods of time because
the user might resize their terminal.
:return: Returns ``(width, height)``. If there's no terminal
to be found, we'll just return ``(79, 40)``.
"""
try:
call = fcntl.ioctl(self.termfd, termios.TIOCGWINSZ, "\000" * 8)
except IOError:
return (79, 40)
else:
height, width = struct.unpack("hhhh", call)[:2]
return (width, height)
@property
def width(self):
"""Returns width of terminal in characters
"""
return self.dimensions[0]
@property
def height(self):
"""Returns height of terminal in lines
"""
return self.dimensions[1]
def _get_bgcolor(self):
return self._bgcolor
def _set_bgcolor(self, color):
if isinstance(color, grapefruit.Color):
self._bgcolor = color
else:
self._bgcolor = grapefruit.Color.NewFromHtml(color)
bgcolor = property(_get_bgcolor, _set_bgcolor)
term = TerminalInfo()
def pil_check():
"""Check for PIL library, printing friendly error if not found
We need PIL for the :mod:`fabulous.text` and :mod:`fabulous.image`
modules to work. Because PIL can be very tricky to install, it's
not listed in the ``setup.py`` requirements list.
"""
try:
import PIL
except ImportError:
raise ImportError("Please install PIL to use this feature: "
"https://pillow.readthedocs.io/en/latest"
"/installation.html")
| {
"content_hash": "bdd0fbee9d92822ac8eb1ef95b278d6b",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 75,
"avg_line_length": 26.739436619718308,
"alnum_prop": 0.5962602054253358,
"repo_name": "jart/fabulous",
"id": "426775f2285a4366cf75bedb1b859cd0cb0c4935",
"size": "4403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabulous/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3629"
},
{
"name": "Python",
"bytes": "212835"
},
{
"name": "Shell",
"bytes": "1202"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.