hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c094079c9c6b163067951cca25388c45cda78e53
| 584
|
py
|
Python
|
Part_2/ch09_FileOprt/9_8_delBigFile.py
|
hyperpc/AutoStuffWithPython
|
e05f5e0acb5818d634e4ab84d640848cd4ae7e70
|
[
"MIT"
] | null | null | null |
Part_2/ch09_FileOprt/9_8_delBigFile.py
|
hyperpc/AutoStuffWithPython
|
e05f5e0acb5818d634e4ab84d640848cd4ae7e70
|
[
"MIT"
] | null | null | null |
Part_2/ch09_FileOprt/9_8_delBigFile.py
|
hyperpc/AutoStuffWithPython
|
e05f5e0acb5818d634e4ab84d640848cd4ae7e70
|
[
"MIT"
] | null | null | null |
import os, re
def delBigFile(src):
filelist = os.listdir(src)
zipfileRegex = re.compile('^(.*)+\.(zip)$')
for filename in filelist:
srcfilepath = os.path.join(src, filename)
if os.path.isdir(srcfilepath):
delBigFile(srcfilepath)
else:
#zipMatches = zipfileRegex.search(srcfilepath)
#if zipMatches != None:
# filesize = os.path.getsize(srcfilepath)
filesize = os.path.getsize(srcfilepath)
print('>> Size of ' + srcfilepath + ': ' + str(filesize))
delBigFile('D:\\pyAuto')
| 32.444444
| 69
| 0.583904
|
0423b7e42c10604e645a202c26ff5817c3f0f76c
| 1,370
|
py
|
Python
|
awacs/states.py
|
chizou/awacs
|
335c545d13ea22488b318245af891eb427c139db
|
[
"BSD-2-Clause"
] | null | null | null |
awacs/states.py
|
chizou/awacs
|
335c545d13ea22488b318245af891eb427c139db
|
[
"BSD-2-Clause"
] | null | null | null |
awacs/states.py
|
chizou/awacs
|
335c545d13ea22488b318245af891eb427c139db
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'AWS Step Functions'
prefix = 'states'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
CreateActivity = Action('CreateActivity')
CreateStateMachine = Action('CreateStateMachine')
DeleteActivity = Action('DeleteActivity')
DeleteStateMachine = Action('DeleteStateMachine')
DescribeActivity = Action('DescribeActivity')
DescribeExecution = Action('DescribeExecution')
DescribeStateMachine = Action('DescribeStateMachine')
GetActivityTask = Action('GetActivityTask')
GetExecutionHistory = Action('GetExecutionHistory')
ListActivities = Action('ListActivities')
ListExecutions = Action('ListExecutions')
ListStateMachines = Action('ListStateMachines')
SendTaskFailure = Action('SendTaskFailure')
SendTaskHeartbeat = Action('SendTaskHeartbeat')
SendTaskSuccess = Action('SendTaskSuccess')
StartExecution = Action('StartExecution')
StopExecution = Action('StopExecution')
| 31.860465
| 70
| 0.754745
|
e5c1289876bedc1492ce6b103c10866e937cdaf5
| 718
|
py
|
Python
|
setup.py
|
jolespin/genopype
|
68c8fa044a85bc9b22d219bdc2d2a88a22be11c0
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
setup.py
|
jolespin/genopype
|
68c8fa044a85bc9b22d219bdc2d2a88a22be11c0
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
setup.py
|
jolespin/genopype
|
68c8fa044a85bc9b22d219bdc2d2a88a22be11c0
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup
# Version
version = None
with open("./genopype/__init__.py", "r") as f:
for line in f.readlines():
line = line.strip()
if line.startswith("__version__"):
version = line.split("=")[-1].strip().strip('"')
assert version is not None, "Check version in genopype/__init__.py"
setup(name='genopype',
version=version,
description='Architecture for building creating pipelines',
url='https://github.com/jolespin/genopype',
author='Josh L. Espinoza',
author_email='jespinoz@jcvi.org',
license='BSD-3',
packages=["genopype"],
install_requires=[
"pathlib2",
"scandir",
"soothsayer_utils",
],
)
| 27.615385
| 67
| 0.623955
|
7e2a061f8a0a2de677ae7a2da347e7019074bd08
| 1,578
|
py
|
Python
|
func/user.py
|
liaojason2/foodmeow
|
aa48976d5c830034a327910144f36a7eb69ff86a
|
[
"MIT"
] | null | null | null |
func/user.py
|
liaojason2/foodmeow
|
aa48976d5c830034a327910144f36a7eb69ff86a
|
[
"MIT"
] | null | null | null |
func/user.py
|
liaojason2/foodmeow
|
aa48976d5c830034a327910144f36a7eb69ff86a
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
from dotenv import load_dotenv
from pymongo import MongoClient
#from bson.objectid import ObjectId
load_dotenv()
conn = MongoClient(os.getenv("MONGODB_CONNECTION"))
db = conn.foodmeow
users = db.users
def checkUserExist(profile):
userId = profile.user_id
displayName = profile.display_name
user = users.find_one({
"userId": userId
})
if(user is None):
users.insert_one({
"userId": userId,
"displayName": displayName,
"status": "free",
"tempData": "",
})
return "NewUser"
def checkUserStatus(userId):
user = users.find_one({
"userId": userId,
})
return user['status']
def changeUserStatus(userId, status):
users.update_one({
"userId": userId,
},
{
'$set': {
"status": status,
}
})
def updateTempData(userId, data):
user = users.find_one({
"userId": userId,
})
if user['tempData'] != "":
data = user['tempData'] + " " + data
users.update_one({
"userId": userId,
},
{
'$set': {
"tempData": data,
}
})
def getTempData(userId):
user = users.find_one({
"userId": userId,
})
return user['tempData']
def deleteTempData(userId):
users.find_one_and_update({
"userId": userId,
},
{
'$set': {
"tempData": "",
}
})
def clearDataToDefault(userId):
deleteTempData(userId)
changeUserStatus(userId, "free")
| 20.493506
| 51
| 0.555133
|
3d259e0bc9510b0a86e1282821b3db3953210ed5
| 4,967
|
py
|
Python
|
letterboxed.py
|
pmclaugh/LetterBoxedNYT
|
fc8f10a7bc18484e4aa4a3112204b1f4354c7090
|
[
"MIT"
] | null | null | null |
letterboxed.py
|
pmclaugh/LetterBoxedNYT
|
fc8f10a7bc18484e4aa4a3112204b1f4354c7090
|
[
"MIT"
] | null | null | null |
letterboxed.py
|
pmclaugh/LetterBoxedNYT
|
fc8f10a7bc18484e4aa4a3112204b1f4354c7090
|
[
"MIT"
] | null | null | null |
import argparse
from typing import List, Set, Union
from collections import defaultdict
from utils import timed
class WordTrieNode:
def __init__(self, value: str, parent: Union['WordTrieNode', None]):
self.value = value
self.parent = parent
self.children = {}
self.valid = False
def get_word(self) -> str:
if self.parent is not None:
return self.parent.get_word() + self.value
else:
return self.value
class LetterBoxed:
@timed
def __init__(self, input_string: str, dictionary: str, len_threshold=3):
# parse the input string (abc-def-ghi-jkl) into set of 4 sides
self.input_string = input_string.lower()
self.sides = {side for side in input_string.split('-')}
self.puzzle_letters = {letter for side in self.sides for letter in side}
self.len_threshold = len_threshold
# build trie from newline-delimited .txt word list
self.root = WordTrieNode('', None)
with open(dictionary) as f:
for line in f.readlines():
self.add_word(line.strip().lower())
# find all valid words in puzzle
self.puzzle_words = self.get_puzzle_words()
# puzzle_graph[starting_letter][ending_letter] = {{letters}: [words]}
# e.g. puzzle_graph['f']['s'] = {{'a','e','f','r','s'} : ['fares', 'fears', 'farers', 'fearers']}
self.puzzle_graph = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for word in self.puzzle_words:
self.puzzle_graph[word[0]][word[-1]][frozenset(word)].append(word)
def add_word(self, word) -> None:
node = self.root
for char in word:
if char not in node.children:
node.children[char] = WordTrieNode(char, node)
node = node.children[char]
node.valid = True
def _puzzle_words_inner(self, node: WordTrieNode, last_side: str) -> List[WordTrieNode]:
valid_nodes = [node] if node.valid else []
if node.children:
for next_side in self.sides - {last_side}:
for next_letter in next_side:
if next_letter in node.children:
next_node = node.children[next_letter]
valid_nodes += self._puzzle_words_inner(next_node, next_side)
return valid_nodes
@timed
def get_puzzle_words(self) -> List[str]:
all_valid_nodes = []
for starting_side in self.sides:
for starting_letter in starting_side:
if starting_letter in self.root.children:
all_valid_nodes += self._puzzle_words_inner(self.root.children[starting_letter], starting_side)
return [node.get_word() for node in all_valid_nodes]
def _find_solutions_inner(self, path_words: List[List[str]], letters: Set[str], next_letter: str) -> List[List[List[str]]]:
if len(letters) == 12:
return [path_words]
elif len(path_words) == self.len_threshold:
return []
solutions = []
for last_letter in self.puzzle_graph[next_letter]:
for letter_edge, edge_words in self.puzzle_graph[next_letter][last_letter].items():
if letter_edge - letters:
solutions += self._find_solutions_inner(path_words + [edge_words], letters | letter_edge, last_letter)
return solutions
@timed
def find_all_solutions(self) -> List[List[str]]:
all_solutions = []
for first_letter in self.puzzle_letters:
for last_letter in self.puzzle_letters:
for letter_edge, edge_words in self.puzzle_graph[first_letter][last_letter].items():
all_solutions += self._find_solutions_inner([edge_words], letter_edge, last_letter)
return all_solutions
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--puzzle', default='mrf-sna-opu-gci', type=str, help='puzzle input in abd-def-ghi-jkl format')
parser.add_argument('--dict', default='words.txt', type=str, help='path to newline-delimited text file of valid words')
parser.add_argument('--len', default=3, type=int, help='maximum length, in words, of solutions')
args = parser.parse_args()
print("solving puzzle", args.puzzle)
puzzle = LetterBoxed(args.puzzle, args.dict, len_threshold=args.len)
print(len(puzzle.puzzle_words), "valid words found")
meta_solutions = puzzle.find_all_solutions()
print(len(meta_solutions), "meta-solutions (meaningfully distinct paths)")
full_count = 0
for meta_solution in meta_solutions:
count = 1
for element in meta_solution:
count *= len(element)
full_count += count
print(full_count, "total solutions (unique combinations/orders of words)")
| 43.191304
| 128
| 0.625126
|
88c496fa0694124229299841dd0e2edff066988a
| 3,766
|
py
|
Python
|
src/features/drought/build_features.py
|
IPL-UV/gauss4eo
|
8aa5c6faa6ff2a43d8c026383cc51643b97601e4
|
[
"MIT"
] | 1
|
2021-03-07T00:30:41.000Z
|
2021-03-07T00:30:41.000Z
|
src/features/drought/build_features.py
|
IPL-UV/gauss4eo
|
8aa5c6faa6ff2a43d8c026383cc51643b97601e4
|
[
"MIT"
] | null | null | null |
src/features/drought/build_features.py
|
IPL-UV/gauss4eo
|
8aa5c6faa6ff2a43d8c026383cc51643b97601e4
|
[
"MIT"
] | null | null | null |
import sys, os
sys.path.insert(0, "/home/emmanuel/code/py_esdc")
from functools import reduce
from esdc.shape import ShapeFileExtract, rasterize
from esdc.preprocessing import calculate_monthly_mean
from esdc.transform import DensityCubes
import xarray as xr
import pandas as pd
import shapely
from typing import Union, List, Tuple
SHAPEFILES = "/media/disk/databases/SHAPEFILES/shape_files_us_states/"
EMDATSHAPE = "/media/disk/databases/SMADI/EMDAT_validation/"
def get_cali_geometry():
# initialize shapefile extractor
shapefiles_clf = ShapeFileExtract()
# extract shapefiles
shapefiles_clf.import_shape_files(SHAPEFILES)
# extract california
query = "name"
subqueries = ["California"]
# get geometries
cali_geoms = shapefiles_clf.extract_polygons(query=query, subqueries=subqueries)
return cali_geoms
def mask_datacube(
ds: Union[xr.DataArray, xr.Dataset],
geometry: shapely.geometry.multipolygon.MultiPolygon,
) -> Union[xr.Dataset, xr.DataArray]:
# mask dataset
ds["cali_mask"] = rasterize(geometry, ds)
ds = ds.where(ds["cali_mask"] == 1, drop=True)
return ds
def smooth_vod_signal(
ds: Union[xr.DataArray, xr.Dataset], window_length=2, center=True
) -> Union[xr.DataArray, xr.Dataset]:
ds["VOD"] = ds.VOD.rolling(time=window_length, center=center).mean()
return ds
def remove_climatology(
ds: Union[xr.DataArray, xr.Dataset]
) -> Union[xr.DataArray, xr.Dataset]:
# calculate the climatology
ds_mean = calculate_monthly_mean(ds)
# remove climatology
ds = ds.groupby("time.month") - ds_mean
return ds, ds_mean
def get_cali_emdata():
shapefiles_clf = ShapeFileExtract()
shapefiles_clf.import_shape_files(EMDATSHAPE)
# Extract Europe
query = "LOCATION"
subqueries = ["California"]
cali_droughts = shapefiles_clf.extract_queries(query=query, subqueries=subqueries)
return cali_droughts
def get_drought_years(
ds: Union[xr.DataArray, xr.Dataset], years=List[str]
) -> Union[xr.Dataset, xr.DataArray]:
ds = xr.concat([ds.sel(time=slice(iyear, iyear)) for iyear in years], dim="time")
return ds
def get_density_cubes(
ds: Union[xr.Dataset, xr.DataArray], spatial: int = 1, temporal: int = 12
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
# initialize density cubes
minicuber = DensityCubes(spatial_window=spatial, time_window=temporal)
# initialize dataframes
drought_VOD = pd.DataFrame()
drought_LST = pd.DataFrame()
drought_NDVI = pd.DataFrame()
drought_SM = pd.DataFrame()
# Group by year and get minicubes
for iyear, igroup in ds.groupby("time.year"):
# get minicubes for variables
drought_VOD = drought_VOD.append(minicuber.get_minicubes(igroup.VOD))
drought_LST = drought_LST.append(minicuber.get_minicubes(igroup.LST))
drought_NDVI = drought_NDVI.append(minicuber.get_minicubes(igroup.NDVI))
drought_SM = drought_SM.append(minicuber.get_minicubes(igroup.SM))
return drought_VOD, drought_LST, drought_NDVI, drought_SM
def normalize(X: pd.DataFrame) -> pd.DataFrame:
X_mean, X_std = X.mean(axis=0), X.std(axis=0)
return (X - X_mean) / X_std
def get_common_elements(
X1: pd.DataFrame, X2: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
idx = X1.index.intersection(X2.index)
return X1.loc[idx], X2.loc[idx]
def get_common_elements_many(dfs: List[pd.DataFrame]) -> List[pd.DataFrame]:
# get common element index
idx = pd.concat(dfs, axis=1, join="inner").index
# get subset elements that are common
dfs = [df.loc[idx] for df in dfs]
return dfs
def get_demo_pixel(ds, pixel: Tuple[float, float] = (-121, 37)):
return None
| 27.093525
| 86
| 0.714551
|
f9dd64afa958f47933ed01f28422bc1b87614d2b
| 2,197
|
py
|
Python
|
health/views/incident.py
|
ruslan-ok/ruslan
|
fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8
|
[
"MIT"
] | null | null | null |
health/views/incident.py
|
ruslan-ok/ruslan
|
fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8
|
[
"MIT"
] | null | null | null |
health/views/incident.py
|
ruslan-ok/ruslan
|
fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8
|
[
"MIT"
] | null | null | null |
from django.utils.translation import gettext_lazy as _
from rusel.base.views import BaseListView, BaseDetailView
from health.forms.incident import CreateForm, EditForm
from task.const import ROLE_INCIDENT, ROLE_APP
from task.models import Task, Urls
from health.config import app_config
from rusel.categories import get_categories_list
role = ROLE_INCIDENT
app = ROLE_APP[role]
class TuneData:
def tune_dataset(self, data, group):
return data
class ListView(BaseListView, TuneData):
model = Task
form_class = CreateForm
def __init__(self, *args, **kwargs):
super().__init__(app_config, role, *args, **kwargs)
class DetailView(BaseDetailView, TuneData):
model = Task
form_class = EditForm
def __init__(self, *args, **kwargs):
super().__init__(app_config, role, *args, **kwargs)
def form_valid(self, form):
response = super().form_valid(form)
form.instance.set_item_attr(app, get_info(form.instance))
return response
def get_info(item):
attr = []
if item.start:
attr.append({'text': '{} {}'.format(_('from'), item.start.strftime('%d.%m.%Y'))})
if item.stop:
attr.append({'text': '{} {}'.format(_('to'), item.stop.strftime('%d.%m.%Y'))})
if item.diagnosis:
attr.append({'text': item.diagnosis})
links = len(Urls.objects.filter(task=item.id)) > 0
files = (len(item.get_files_list(role)) > 0)
if item.info or links or files:
if (len(attr) > 0):
attr.append({'icon': 'separator'})
if links:
attr.append({'icon': 'url'})
if files:
attr.append({'icon': 'attach'})
if item.info:
info_descr = item.info[:80]
if len(item.info) > 80:
info_descr += '...'
attr.append({'icon': 'notes', 'text': info_descr})
if item.categories:
if (len(attr) > 0):
attr.append({'icon': 'separator'})
categs = get_categories_list(item.categories)
for categ in categs:
attr.append({'icon': 'category', 'text': categ.name, 'color': 'category-design-' + categ.design})
ret = {'attr': attr}
return ret
| 30.09589
| 109
| 0.615385
|
b1dd44df4b8716f96285694fe4286b10cef28553
| 6,351
|
py
|
Python
|
_django/queries_restfw.py
|
zzzeek/imdbench
|
4f95aa769e90bfbada84ecfe8a9478d895260b44
|
[
"Apache-2.0"
] | 50
|
2022-03-23T00:34:34.000Z
|
2022-03-31T01:50:09.000Z
|
_django/queries_restfw.py
|
zzzeek/imdbench
|
4f95aa769e90bfbada84ecfe8a9478d895260b44
|
[
"Apache-2.0"
] | 11
|
2022-03-23T18:18:18.000Z
|
2022-03-30T21:50:03.000Z
|
_django/queries_restfw.py
|
zzzeek/imdbench
|
4f95aa769e90bfbada84ecfe8a9478d895260b44
|
[
"Apache-2.0"
] | 5
|
2022-03-25T17:10:20.000Z
|
2022-03-28T18:24:02.000Z
|
#
# Copyright (c) 2019 MagicStack Inc.
# All rights reserved.
#
# See LICENSE for details.
##
from django.db import connection
from django.test.client import RequestFactory
import json
import random
from . import bootstrap # NoQA
from . import models
from . import views
rf = RequestFactory()
DUMMY_REQUEST = rf.get('/')
USER_VIEW = views.UserDetailsViewSet.as_view({'get': 'retrieve'})
MOVIE_VIEW = views.MovieDetailsViewSet.as_view({'get': 'retrieve'})
PERSON_VIEW = views.PersonDetailsViewSet.as_view({'get': 'retrieve'})
MOVIE_UPDATE_VIEW = views.MovieUpdateViewSet.as_view({'post': 'update'})
USER_INSERT_VIEW = views.UserInsertViewSet.as_view({'post': 'create'})
INSERT_PREFIX = 'insert_test__'
def init(ctx):
from django.conf import settings
settings.DATABASES["default"]["HOST"] = ctx.db_host
def connect(ctx):
# Django fully abstracts away connection management, so we
# rely on it to create a new connection for every benchmark
# thread.
return None
def close(ctx, db):
return
def load_ids(ctx, db):
users = models.User.objects.raw('''
SELECT * FROM _django_user ORDER BY random() LIMIT %s
''', [ctx.number_of_ids])
movies = models.Movie.objects.raw('''
SELECT * FROM _django_movie ORDER BY random() LIMIT %s
''', [ctx.number_of_ids])
people = models.Person.objects.raw('''
SELECT * FROM _django_person ORDER BY random() LIMIT %s
''', [ctx.number_of_ids])
return dict(
get_user=[d.id for d in users],
get_movie=[d.id for d in movies],
get_person=[d.id for d in people],
# re-use user IDs for update tests
update_movie=[d.id for d in movies],
# generate as many insert stubs as "concurrency" to
# accommodate concurrent inserts
insert_user=[INSERT_PREFIX] * ctx.concurrency,
insert_movie=[{
'prefix': INSERT_PREFIX,
'people': [p.id for p in people[:4]],
}] * ctx.concurrency,
insert_movie_plus=[INSERT_PREFIX] * ctx.concurrency,
)
def get_user(conn, id):
return USER_VIEW(DUMMY_REQUEST, pk=id).render().getvalue()
def get_movie(conn, id):
return MOVIE_VIEW(DUMMY_REQUEST, pk=id).render().getvalue()
def get_person(conn, id):
return PERSON_VIEW(DUMMY_REQUEST, pk=id).render().getvalue()
def update_movie(conn, id):
return MOVIE_UPDATE_VIEW(
rf.post('/', data={'title': f'{id}'}),
pk=id
).render().getvalue()
def insert_user(conn, val):
num = random.randrange(1_000_000)
return USER_INSERT_VIEW(
rf.post(
'/',
data={'name': f'{val}{num}', 'image': f'{val}image{num}'}
)
).render().getvalue()
def insert_movie(conn, val):
# copied from plain Django test, because it appears that the
# nested insert would be customized similar to this anyway
num = random.randrange(1_000_000)
people = models.Person.objects.filter(pk__in=val['people']).all()
movie = models.Movie.objects.create(
title=f'{val["prefix"]}{num}',
image=f'{val["prefix"]}image{num}.jpeg',
description=f'{val["prefix"]}description{num}',
year=num,
)
movie.directors.set(people[:1])
movie.cast.set(people[1:])
movie.save()
return json.dumps(views.CustomMovieView.render(None, movie))
def insert_movie_plus(conn, val):
# copied from plain Django test, because it appears that the
# nested insert would be customized similar to this anyway
num = random.randrange(1_000_000)
director = models.Person.objects.create(
first_name=f'{val}Alice',
last_name=f'{val}Director',
image=f'{val}image{num}.jpeg',
bio='',
)
c0 = models.Person.objects.create(
first_name=f'{val}Billie',
last_name=f'{val}Actor',
image=f'{val}image{num+1}.jpeg',
bio='',
)
c1 = models.Person.objects.create(
first_name=f'{val}Cameron',
last_name=f'{val}Actor',
image=f'{val}image{num+2}.jpeg',
bio='',
)
movie = models.Movie.objects.create(
title=f'{val}{num}',
image=f'{val}image{num}.jpeg',
description=f'{val}description{num}',
year=num,
)
movie.directors.set([director])
movie.cast.set([c0, c1])
movie.save()
return json.dumps(views.CustomMovieView.render(None, movie))
def setup(ctx, conn, queryname):
if queryname == 'update_movie':
with connection.cursor() as cur:
cur.execute('''
UPDATE
_django_movie
SET
title = split_part(_django_movie.title, '---', 1)
WHERE
_django_movie.title LIKE '%---%';
''')
elif queryname == 'insert_user':
with connection.cursor() as cur:
cur.execute('''
DELETE FROM
_django_user
WHERE
_django_user.name LIKE %s
''', [f'{INSERT_PREFIX}%'])
elif queryname in {'insert_movie', 'insert_movie_plus'}:
with connection.cursor() as cur:
cur.execute('''
DELETE FROM
"_django_directors" as D
USING
"_django_movie" as M
WHERE
D.movie_id = M.id AND M.image LIKE %s;
''', [f'{INSERT_PREFIX}%'])
cur.execute('''
DELETE FROM
"_django_cast" as C
USING
"_django_movie" as M
WHERE
C.movie_id = M.id AND M.image LIKE %s;
''', [f'{INSERT_PREFIX}%'])
cur.execute('''
DELETE FROM
"_django_movie" as M
WHERE
M.image LIKE %s;
''', [f'{INSERT_PREFIX}%'])
cur.execute('''
DELETE FROM
"_django_person" as P
WHERE
P.image LIKE %s;
''', [f'{INSERT_PREFIX}%'])
def cleanup(ctx, conn, queryname):
if queryname in {'update_movie', 'insert_user', 'insert_movie',
'insert_movie_plus'}:
# The clean up is the same as setup for mutation benchmarks
setup(ctx, conn, queryname)
| 29.539535
| 72
| 0.577074
|
7d6bdc79bbc74a91b96d7e15f9241a2229264de0
| 3,338
|
py
|
Python
|
ploter.py
|
beneduzi/PlotNC
|
738adf4ba3bf7adc2dab8446aee532968d1c0c05
|
[
"Apache-2.0"
] | 1
|
2018-08-09T16:23:35.000Z
|
2018-08-09T16:23:35.000Z
|
ploter.py
|
beneduzi/PlotNC
|
738adf4ba3bf7adc2dab8446aee532968d1c0c05
|
[
"Apache-2.0"
] | null | null | null |
ploter.py
|
beneduzi/PlotNC
|
738adf4ba3bf7adc2dab8446aee532968d1c0c05
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from netCDF4 import Dataset
import argparse
# import numpy as np
import yaml
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
def gen_map_plot(right_c, left_c, file_name, data, scat_lat=0, scat_lon=0):
"""
Efectivile creates the plot,
lat [-90, 90]
lon [-180, 180] or [0, 360]
"""
# try:
fig1 = plt.figure(file_name,figsize=(16, 9))
map1 = Basemap(projection='cyl', llcrnrlat=float(left_c[0]), urcrnrlat=float(right_c[0]), llcrnrlon=float(left_c[1]), urcrnrlon=float(right_c[1]), resolution='h')
map1.drawcoastlines()
map1.drawcountries(linewidth=0.5, linestyle='solid', color='k', antialiased=1, ax=None, zorder=None)
map1.fillcontinents(color='lightgray', zorder=0)
try:
ny = data.shape[1]
nx = data.shape[2]
except:
ny = data.shape[0]
nx = data.shape[1]
lons, lats = map1.makegrid(nx, ny)
x, y = map1(lons, lats)
try:
map1.contourf(x, y, data[0,:,:])
except:
map1.contourf(x, y, data[:,:])
map1.colorbar(location='right', size='5%', pad='2%')
map1.scatter(scat_lon, scat_lat, s=30, marker='o', latlon=True, linewidth=0, )
plt.savefig(file_name, dpi=300, pad_inches=0)
return True
# except:
# return False
parser = argparse.ArgumentParser(description="This script genarete plots from NetCDF4")
parser.add_argument('-i', help="Imput NetCDF4 file", action='store',required=True, dest='nc_path')
parser.add_argument('-o', help="Output destination", action='store',required=False, dest='out_path')
parser.add_argument('-v', help="Variables list", action='store',required=False, dest='nc_vars')
parser.add_argument('-l', help="Lat0, Lon0, Lat1, Lon2", action='store',required=False, dest='lat_lon', type=str)
parser.add_argument('-s', help="Scatter points yml file", action='store',required=False, dest='scat_vars')
args=parser.parse_args()
nc_path = args.nc_path
nc_vars = [args.nc_vars] or ['T2','RAINNC', 'Q2', 'PSFC']
out_path = args.out_path or './'
scat_file = args.scat_vars or 'Null'
try:
lat_lon =[float(item) for item in args.lat_lon.split(',')]
left_c = [lat_lon[0], lat_lon[1]]
right_c = [lat_lon[2], lat_lon[3]]
except:
left_c = [-89.9, -179.9]
right_c = [89.9, 179.9]
if scat_file != 'Null':
with open(scat_file,'r') as yf:
scat_yml = yaml.safe_load(yf)
scat_lat = scat_yml['lat']
scat_lon = scat_yml['lon']
else:
scat_lat = 0
scat_lon = 0
nc_file = Dataset(nc_path, 'r')
for var in nc_vars:
data = nc_file.variables[var]
for i in range(0, len(data)):
file_name = out_path + var + str(i) + '.png'
try:
suc = gen_map_plot(right_c, left_c, file_name, data[i,:,:], scat_lat, scat_lon)
brkr = False
except:
suc = gen_map_plot(right_c, left_c, file_name, data[:,:], scat_lat, scat_lon)
brkr = True
if suc == True:
s = 'plot %s, timestep %s done!' %(var, i)
print(s)
else:
s = 'Fail to plot %s, timestep %s' %(var, i)
print(s)
if brkr == True:
break
nc_file.close()
| 33.717172
| 170
| 0.597963
|
208ff1fa58fffd4c60294d8bcac0318e20b1c51d
| 79
|
py
|
Python
|
Chapter 11/ch11_1_34.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 11/ch11_1_34.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 11/ch11_1_34.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
t1=tuple((1,"Gold",2,"Silver"))
print(1 in t1)
# True
# please note (())
| 13.166667
| 32
| 0.544304
|
bc05ae43b5bb08be9b25ffc82fffa7c2d4cc3223
| 785
|
py
|
Python
|
tests/__init__.py
|
magfest/residue
|
d8621d8a9c43c19b67f0ee532655aaeaaae4ca62
|
[
"BSD-3-Clause"
] | 1
|
2018-02-26T19:03:19.000Z
|
2018-02-26T19:03:19.000Z
|
tests/__init__.py
|
magfest/residue
|
d8621d8a9c43c19b67f0ee532655aaeaaae4ca62
|
[
"BSD-3-Clause"
] | null | null | null |
tests/__init__.py
|
magfest/residue
|
d8621d8a9c43c19b67f0ee532655aaeaaae4ca62
|
[
"BSD-3-Clause"
] | 2
|
2018-02-05T19:49:30.000Z
|
2018-02-24T18:10:30.000Z
|
import sqlalchemy
from sqlalchemy import event
from sqlalchemy.orm import sessionmaker
def patch_session(Session, request):
orig_engine, orig_factory = Session.engine, Session.session_factory
request.addfinalizer(lambda: setattr(Session, 'engine', orig_engine))
request.addfinalizer(lambda: setattr(Session, 'session_factory', orig_factory))
db_path = '/tmp/residue.db'
Session.engine = sqlalchemy.create_engine('sqlite+pysqlite:///' + db_path)
event.listen(Session.engine, 'connect', lambda conn, record: conn.execute('pragma foreign_keys=ON'))
Session.session_factory = sessionmaker(bind=Session.engine, autoflush=False, autocommit=False,
query_cls=Session.QuerySubclass)
Session.initialize_db(drop=True)
| 46.176471
| 104
| 0.735032
|
8ca48a5e7514f8895b073476d0a8e17ff4f87c9c
| 2,923
|
py
|
Python
|
micropython/main.py
|
fabrizziop/SmartCurrent
|
d53bb89348e7aa22d4d6351291d6b7339bf542c6
|
[
"MIT"
] | null | null | null |
micropython/main.py
|
fabrizziop/SmartCurrent
|
d53bb89348e7aa22d4d6351291d6b7339bf542c6
|
[
"MIT"
] | null | null | null |
micropython/main.py
|
fabrizziop/SmartCurrent
|
d53bb89348e7aa22d4d6351291d6b7339bf542c6
|
[
"MIT"
] | null | null | null |
import time
import usocket
from machine import Pin, ADC
from utime import sleep, ticks_ms
from nodemcu_gpio_lcd import GpioLcd
CALIB_CONST_MUL = 0.08076
LIST_MAX_LENGTH = 60
IP_RECEIVER = "YOUR_SERVER_IP"
PORT_RECEIVER = 8000
IOT_RECEIVER = usocket.getaddrinfo(IP_RECEIVER, PORT_RECEIVER)[0][-1]
PROGRAM_NAME = "SMARTCURRENTv0.1"
WARMUP_TIME = 60
current_warmup = 0
pin_ctrl = Pin(15, Pin.OUT, value=0)
pin_adc = ADC(0)
lcd = GpioLcd(rs_pin=Pin(16),enable_pin=Pin(5),d4_pin=Pin(4),d5_pin=Pin(0),d6_pin=Pin(2),d7_pin=Pin(14),num_lines=2, num_columns=20)
lcd.putstr(PROGRAM_NAME+"\n"+" DO NOT TOUCH ")
time.sleep(10)
lcd.clear()
while current_warmup < WARMUP_TIME:
lcd.putstr("TIME REMAINING:\n "+str(WARMUP_TIME-current_warmup))
current_warmup += 1
time.sleep(1)
lcd.clear()
lcd.putstr(IP_RECEIVER + "\nPORT:" + str(PORT_RECEIVER))
time.sleep(15)
lcd.clear()
def clear_and_print(string_to_print):
lcd.clear()
lcd.putstr(string_to_print)
def obtain_zero():
clear_and_print(" OBTAINING\n ZEROS")
pin_ctrl.value(1)
time.sleep(0.25)
a = 0
for i in range(50):
time.sleep(0.02)
a += pin_adc.read()
pin_ctrl.value(0)
time.sleep(0.25)
return a//50
def obtain_raw_rms(zero):
ct = time.ticks_ms()
rms_sum = rms_amount = 0
while time.ticks_ms() < (ct+1000):
rms_sum += (zero - pin_adc.read()) ** 2
rms_amount += 1
calculated_rms = (rms_sum / rms_amount)**0.5
return max(calculated_rms-2,0)
def convert_to_real_rms(calculated_rms):
return calculated_rms * CALIB_CONST_MUL
def print_current(real_rms, zero, extra):
lcd.clear()
lcd.putstr(str(real_rms)[:5] + " A; Z: "+ str(zero)+ "\n"+extra)
def convert_to_bytearray_4socket(nonce, zero, val_list):
tb = bytearray()
tb.append(nonce[0])
tb.append(nonce[1])
tb.append(zero)
for i in range(0, LIST_MAX_LENGTH):
tb.append(val_list[i])
return tb
def send_socket(bytearray_to_send):
s = usocket.socket()
s.settimeout(0.5)
try:
s.connect(IOT_RECEIVER)
sdat = s.send(bytearray_to_send)
s.close()
return " DATA SEND OK "
except:
return " DATA SEND FAIL "
def main_loop():
needs_calibration = True
list_nonce = [0,0]
actual_list = []
for i in range(0, LIST_MAX_LENGTH):
actual_list.append(0)
while True:
if needs_calibration:
zero = obtain_zero()
needs_calibration = False
a = obtain_raw_rms(zero)
b = convert_to_real_rms(a)
actual_list.append(int(min(a//2,255)))
actual_list.pop(0)
list_nonce[0] += 1
if list_nonce[0] == 255:
list_nonce[0] = 0
list_nonce[1] += 1
if list_nonce[1] == 255:
list_nonce = [0,0]
needs_calibration = True
for i in range(0,10):
time.sleep(0.05)
#print(a)
#print(actual_list)
#print(list_nonce)
#print(convert_to_bytearray_4socket(list_nonce, actual_list))
status_to_print = send_socket(convert_to_bytearray_4socket(list_nonce, int(min(zero//4,255)), actual_list))
print_current(b, zero, status_to_print)
gc.collect()
main_loop()
| 25.867257
| 132
| 0.716387
|
618480b11ce37f579f09e445fac82f4fe889c6d1
| 27,997
|
py
|
Python
|
bitfinex/websockets/client.py
|
aitorSTL/bitfinex
|
b34ba109500d33ffebe0ff488f93435ecb49e758
|
[
"MIT"
] | 5
|
2020-12-09T22:22:08.000Z
|
2020-12-13T17:18:28.000Z
|
bitfinex/websockets/client.py
|
aitorSTL/bitfinex
|
b34ba109500d33ffebe0ff488f93435ecb49e758
|
[
"MIT"
] | null | null | null |
bitfinex/websockets/client.py
|
aitorSTL/bitfinex
|
b34ba109500d33ffebe0ff488f93435ecb49e758
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import threading
import json
import hmac
import hashlib
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
from twisted.internet import reactor, ssl
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.internet.error import ReactorAlreadyRunning
from bitfinex import utils
from . import abbreviations
# Example used to make send logic
# https://stackoverflow.com/questions/18899515/writing-an-interactive-client-with-twisted-autobahn-websockets
class BitfinexClientProtocol(WebSocketClientProtocol):
def __init__(self, factory, payload=None):
super().__init__()
self.factory = factory
self.payload = payload
def onOpen(self):
self.factory.protocol_instance = self
def onConnect(self, response):
if self.payload:
self.sendMessage(self.payload, isBinary=False)
# reset the delay after reconnecting
self.factory.resetDelay()
def onMessage(self, payload, isBinary):
if not isBinary:
try:
payload_obj = json.loads(payload.decode('utf8'))
except ValueError:
pass
else:
self.factory.callback(payload_obj)
class BitfinexReconnectingClientFactory(ReconnectingClientFactory):
# set initial delay to a short time
initialDelay = 0.1
maxDelay = 20
maxRetries = 30
class BitfinexClientFactory(WebSocketClientFactory, BitfinexReconnectingClientFactory):
def __init__(self, *args, payload=None, **kwargs):
WebSocketClientFactory.__init__(self, *args, **kwargs)
self.protocol_instance = None
self.base_client = None
self.payload = payload
protocol = BitfinexClientProtocol
_reconnect_error_payload = {
'e': 'error',
'm': 'Max reconnect retries reached'
}
def clientConnectionFailed(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
def clientConnectionLost(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
def buildProtocol(self, addr):
return BitfinexClientProtocol(self, payload=self.payload)
class BitfinexSocketManager(threading.Thread):
STREAM_URL = 'wss://api.bitfinex.com/ws/2'
def __init__(self): # client
"""Initialise the BitfinexSocketManager"""
threading.Thread.__init__(self)
self.factories = {}
self._connected_event = threading.Event()
self._conns = {}
self._user_timer = None
self._user_listen_key = None
self._user_callback = None
def _start_socket(self, id_, payload, callback):
if id_ in self._conns:
return False
factory_url = self.STREAM_URL
factory = BitfinexClientFactory(factory_url, payload=payload)
factory.base_client = self
factory.protocol = BitfinexClientProtocol
factory.callback = callback
factory.reconnect = True
self.factories[id_] = factory
reactor.callFromThread(self.add_connection, id_)
def add_connection(self, id_):
"""
Convenience function to connect and store the resulting
connector.
"""
factory = self.factories[id_]
context_factory = ssl.ClientContextFactory()
self._conns[id_] = connectWS(factory, context_factory)
def stop_socket(self, conn_key):
"""Stop a websocket given the connection key
Parameters
----------
conn_key : str
Socket connection key
Returns
-------
str, bool
connection key string if successful, False otherwise
"""
if conn_key not in self._conns:
return
# disable reconnecting if we are closing
self._conns[conn_key].factory = WebSocketClientFactory(self.STREAM_URL)
self._conns[conn_key].disconnect()
del self._conns[conn_key]
def run(self):
try:
reactor.run(installSignalHandlers=False)
except ReactorAlreadyRunning:
# Ignore error about reactor already running
pass
def close(self):
"""Close all connections
"""
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {}
class WssClient(BitfinexSocketManager):
"""Websocket client for bitfinex.
Parameters
----------
key : str
Your API key.
secret : str
Your API secret
.. Hint::
Do not store your key or secret directly in the code.
Use environment variables. and fetch them with
``os.environ.get("BITFINEX_KEY")``
"""
###########################################################################
# Bitfinex commands
###########################################################################
def __init__(self, key=None, secret=None, nonce_multiplier=1.0): # client
super().__init__()
self.key = key
self.secret = secret
self.nonce_multiplier = nonce_multiplier
def stop(self):
"""Tries to close all connections and finally stops the reactor.
Properly stops the program."""
try:
self.close()
finally:
reactor.stop()
def _nonce(self):
"""Returns a nonce used in authentication.
Nonce must be an increasing number, if the API key has been used
earlier or other frameworks that have used higher numbers you might
need to increase the nonce_multiplier."""
return str(utils.get_nonce(self.nonce_multiplier))
def authenticate(self, callback, filters=None):
"""Method used to create an authenticated channel that both recieves
account spesific messages and is used to send account spesific messages.
So in order to be able to use the new_order method, you have to
create a authenticated channel before starting the connection.
Parameters
----------
callback : func
A function to use to handle incomming messages. This channel wil
be handling all messages returned from operations like new_order or
cancel_order, so make sure you handle all these messages.
filters : List[str]
A list of filter strings. See more information here:
https://docs.bitfinex.com/v2/docs/ws-auth#section-channel-filters
Example
-------
::
def handle_account_messages(message):
print(message)
# You should only need to create and authenticate a client once.
# Then simply reuse it later
my_client = WssClient(key, secret)
my_client.authenticate(
callback=handle_account_messages
)
my_client.start()
"""
nonce = self._nonce()
auth_payload = 'AUTH{}'.format(nonce)
signature = hmac.new(
self.secret.encode(), # settings.API_SECRET.encode()
msg=auth_payload.encode('utf8'),
digestmod=hashlib.sha384
).hexdigest()
data = {
# docs: http://bit.ly/2CEx9bM
'event': 'auth',
'apiKey': self.key,
'authSig': signature,
'authPayload': auth_payload,
'authNonce': nonce,
'calc': 1
}
if filters:
data['filter'] = filters
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
return self._start_socket("auth", payload, callback)
def subscribe_to_ticker(self, symbol, callback):
"""Subscribe to the passed symbol ticks data channel.
Parameters
----------
symbol : str
Symbol to request data for.
callback : func
A function to use to handle incomming messages
Example
-------
::
def my_handler(message):
# Here you can do stuff with the messages
print(message)
# You should only need to create and authenticate a client once.
# Then simply reuse it later
my_client = WssClient(key, secret)
my_client.authenticate(print)
my_client.subscribe_to_ticker(
symbol="BTCUSD",
callback=my_handler
)
my_client.start()
"""
symbol = utils.order_symbol(symbol)
id_ = "_".join(["ticker", symbol])
data = {
'event': 'subscribe',
'channel': 'ticker',
'symbol': symbol,
}
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
return self._start_socket(id_, payload, callback)
def subscribe_to_trades(self, symbol, callback):
"""Subscribe to the passed symbol trades data channel.
Parameters
----------
symbol : str
Symbol to request data for.
callback : func
A function to use to handle incomming messages
Example
-------
::
def my_handler(message):
# Here you can do stuff with the messages
print(message)
# You should only need to create and authenticate a client once.
# Then simply reuse it later
my_client = WssClient(key, secret)
my_client.authenticate(print)
my_client.subscribe_to_trades(
symbol="BTCUSD",
callback=my_handler
)
my_client.start()
"""
symbol = utils.order_symbol(symbol)
id_ = "_".join(["trades", symbol])
data = {
'event': 'subscribe',
'channel': 'trades',
'symbol': symbol,
}
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
return self._start_socket(id_, payload, callback)
# Precision: R0, P0, P1, P2, P3
# Length: 1,25,100
def subscribe_to_orderbook(self, symbol, precision, length, callback):
"""Subscribe to the orderbook of a given symbol.
Parameters
----------
symbol : str
Symbol to request data for.
precision : str
Accepted values as strings {R0, P0, P1, P2, P3}
length : int
Initial snapshot length. Accepted values {1,25,100}
callback : func
A function to use to handle incomming messages
Example
-------
::
def my_handler(message):
# Here you can do stuff with the messages
print(message)
# You should only need to create and authenticate a client once.
# Then simply reuse it later
my_client = WssClient(key, secret)
my_client.subscribe_to_orderbook(
symbol="BTCUSD",
precision="P1",
length=25,
callback=my_handler
)
my_client.start()
"""
symbol = utils.order_symbol(symbol)
id_ = "_".join(["order", symbol])
data = {
'event': 'subscribe',
"channel": "book",
"prec": precision,
"len": length,
'symbol': symbol,
}
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
return self._start_socket(id_, payload, callback)
def subscribe_to_candles(self, symbol, timeframe, callback):
"""Subscribe to the passed symbol's OHLC data channel.
Parameters
----------
symbol : str
Symbol to request data for
timeframe : str
Accepted values as strings {1m, 5m, 15m, 30m, 1h, 3h, 6h, 12h,
1D, 7D, 14D, 1M}
callback : func
A function to use to handle incomming messages
Returns
-------
str
The socket identifier.
Example
-------
::
def my_candle_handler(message):
# Here you can do stuff with the candle bar messages
print(message)
# You should only need to create and authenticate a client once.
# Then simply reuse it later
my_client = WssClient(key, secret)
my_client.subscribe_to_candles(
symbol="BTCUSD",
timeframe="1m",
callback=my_candle_handler
)
my_client.subscribe_to_candles(
symbol="ETHUSD",
timeframe="5m",
callback=my_candle_handler
)
my_client.start()
"""
valid_tfs = ['1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D',
'7D', '14D', '1M']
if timeframe:
if timeframe not in valid_tfs:
raise ValueError("timeframe must be any of %s" % valid_tfs)
else:
timeframe = '1m'
identifier = ('candles', symbol, timeframe)
id_ = "_".join(identifier)
symbol = utils.order_symbol(symbol)
key = 'trade:' + timeframe + ':' + symbol
data = {
'event': 'subscribe',
'channel': 'candles',
'key': key,
}
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
return self._start_socket(id_, payload, callback)
def ping(self, channel="auth"):
"""Ping bitfinex.
Parameters
----------
channel : str
What channel id that should be pinged. Default "auth".
To get channel id’s use ´client._conns.keys()´.
"""
client_cid = utils.create_cid()
data = {
'event': 'ping',
'cid': client_cid
}
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
self.factories[channel].protocol_instance.sendMessage(payload, isBinary=False)
return client_cid
def new_order_op(self, order_type, symbol, amount, price, price_trailing=None,
price_aux_limit=None, price_oco_stop=None, hidden=0,
flags=None, tif=None, set_cid=True):
"""Create new order operation
Parameters
----------
order_type : str
Order type. Must be one of: "LIMIT", "STOP", "MARKET",
"TRAILING STOP", "FOK", "STOP LIMIT" or equivelent with "EXCHANGE"
prepended to it. All orders starting with EXCHANGE are made on the
exchange wallet. Orders without it is made on the margin wallet and
will start or change a position.
symbol : str
The currency symbol to be traded. e.g. BTCUSD
amount : decimal str
The amount to be traided.
price : decimal str
The price to buy at. Will be ignored for market orders.
price_trailing : decimal string
The trailing price
price_aux_limit : decimal string
Auxiliary Limit price (for STOP LIMIT)
price_oco_stop : decimal string
OCO stop price
hidden : bool
Whether or not to use the hidden order type.
flags : list
A list of integers for the different flags. Will be added together
into a unique integer.
tif : datetime string
set_cid : bool
wheter or not to set a cid.
Returns
-------
dict
A dict containing the order detials. Used in new_order and for
creating multiorders.
Example
-------
Note if you only want to create a new order, use the ´´new_order´´
method bellow. However if you want to submitt multiple order and
cancel orders at the same time use this method to create order
operations and send them with the ``multi_order`` method::
# You should only need to create and authenticate a client once.
# Then simply reuse it later
my_client = WssClient(key, secret)
my_client.authenticate()
my_client.start()
order_operation = my_client.new_order_op(
order_type="LIMIT",
symbol="BTCUSD",
amount=0.004,
price=1000.0
)
# Usefull to keep track of an order by its client id, for later
# operations (like cancel order).
clinet_id = order_operation["cid"]
my_client.multi_order(
operations=[order_operation]
)
"""
flags = flags or []
order_op = {
'type': order_type,
'symbol': utils.order_symbol(symbol),
'amount': amount,
'price': price,
'hidden': hidden,
'flags': sum(flags),
'meta': { 'aff_code': 'b2UR2iQr' }
}
if price_trailing:
order_op['price_trailing'] = price_trailing
if price_aux_limit:
order_op['price_aux_limit'] = price_aux_limit
if price_oco_stop:
order_op['price_oco_stop'] = price_oco_stop
if tif:
order_op['tif'] = tif
if set_cid:
client_order_id = utils.create_cid()
order_op['cid'] = client_order_id
order_op = [
abbreviations.get_notification_code('order new'),
order_op
]
return order_op
def new_order(self, order_type, symbol, amount, price, price_trailing=None,
price_aux_limit=None, price_oco_stop=None, hidden=0,
flags=None, tif=None, set_cid=True):
"""
Create new order.
Parameters
----------
order_type : str
Order type. Must be one of: "LIMIT", "STOP", "MARKET",
"TRAILING STOP", "FOK", "STOP LIMIT" or equivelent with "EXCHANGE"
prepended to it. All orders starting with EXCHANGE are made on the
exchange wallet. Orders without it is made on the margin wallet and
will start or change a position.
symbol : str
The currency symbol to be traded. e.g. BTCUSD
amount : decimal string
The amount to be traided.
price : decimal string
The price to buy at. Will be ignored for market orders.
price_trailing : decimal string
The trailing price
price_aux_limit : decimal string
Auxiliary Limit price (for STOP LIMIT)
price_oco_stop : decimal string
OCO stop price
hidden : bool
Whether or not to use the hidden order type.
flags : list
A list of integers for the different flags. Will be added together
into a unique integer.
tif : datetime string
set_cid : bool
wheter or not to set a cid.
Returns
-------
int
Order client id (cid). The CID is also a mts date stamp of when the
order was created.
Example
-------
::
# You should only need to create and authenticate a client once.
# Then simply reuse it later
my_client = WssClient(key, secret)
my_client.authenticate()
my_client.start()
order_client_id = my_client.new_order(
order_type="LIMIT",
symbol="BTCUSD",
amount=0.004,
price=1000.0
)
"""
operation = self.new_order_op(
order_type=order_type,
symbol=symbol,
amount=amount,
price=price,
price_trailing=price_trailing,
price_aux_limit=price_aux_limit,
price_oco_stop=price_oco_stop,
hidden=hidden,
flags=flags,
tif=tif,
set_cid=set_cid
)
data = [0, operation[0], None, operation[1]]
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
self.factories["auth"].protocol_instance.sendMessage(payload, isBinary=False)
if set_cid is True:
return operation[1]["cid"]
else:
return None
def multi_order(self, operations):
"""Multi order operation.
Parameters
----------
operations : list
a list of operations. Read more here:
https://bitfinex.readme.io/v2/reference#ws-input-order-multi-op
Hint. you can use the self.new_order_op() for easy new order
operation creation.
Returns
-------
list
A list of all the client ids created for each order. Returned in
the order they are given to the method.
Example
-------
::
# You should only need to create and authenticate a client once.
# Then simply reuse it later
from bitfinex import utils
my_client = WssClient(key, secret)
my_client.authenticate()
my_client.start()
example_order_cid_to_cancel = 153925861909296
# docs: http://bit.ly/2BVqwW6
cancel_order_operation = {
'cid': example_order_cid_to_cancel,
'cid_date': utils.cid_to_date(example_order_cid_to_cancel)
}
new_order_operation = my_client.new_order_op(
order_type="LIMIT",
symbol="BTCUSD",
amount="0.004",
price="1000.0"
)
order_client_id = my_client.multi_order([
cancel_order_operation,
new_order_operation
])
"""
data = [
0,
abbreviations.get_notification_code('order multi-op'),
None,
operations
]
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
self.factories["auth"].protocol_instance.sendMessage(payload, isBinary=False)
return [order[1].get("cid", None) for order in operations]
def cancel_order(self, order_id):
"""Cancel order
Parameters
----------
order_id : int, str
Order id created by Bitfinex
Example
-------
::
# You should only need to create and authenticate a client once.
# Then simply reuse it later
my_client = WssClient(key, secret)
my_client.authenticate()
my_client.start()
my_client.cancel_order(
order_id=1234
)
"""
data = [
0,
abbreviations.get_notification_code('order cancel'),
None,
{
# docs: http://bit.ly/2BVqwW6
'id': order_id
}
]
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
self.factories["auth"].protocol_instance.sendMessage(payload, isBinary=False)
def cancel_order_cid(self, order_cid, order_date):
"""Cancel order using the client id and the date of the cid. Both are
returned from the new_order command from this library.
Parameters
----------
order_cid : str
cid string. e.g. "1234154"
order_date : str
Iso formated order date. e.g. "2012-01-23"
Example
-------
::
# You should only need to create and authenticate a client once.
# Then simply reuse it later
my_client = WssClient(key, secret)
my_client.authenticate()
my_client.start()
# order_cid created by this library is always a milliseconds
# time stamp. So you can just divide it by 1000 to get the timestamp.
my_client.cancel_order(
order_cid=1538911910035,
order_date=(
datetime.utcfromtimestamp(
1538911910035/1000.0
).strftime("%Y-%m-%d")
)
)
"""
data = [
0,
abbreviations.get_notification_code('order cancel'),
None,
{
# docs: http://bit.ly/2BVqwW6
'cid': order_cid,
'cid_date': order_date
}
]
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
self.factories["auth"].protocol_instance.sendMessage(payload, isBinary=False)
def update_order(self, **order_settings):
"""Update order using the order id
Parameters
----------
id : int64
Order ID
gid : int32
Group Order ID
price : decimal string
Price
amount : decimal string
Amount
delta : decimal string
Change of amount
price_aux_limit : decimal string
Auxiliary limit price
price_trailing : decimal string
Trailing price delta
tif : datetime string
Time-In-Force: datetime for automatic order cancellation (ie. 2020-01-01 10:45:23)
"""
data = [
0,
abbreviations.get_notification_code('order update'),
None,
order_settings
]
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
self.factories["auth"].protocol_instance.sendMessage(payload, isBinary=False)
def calc(self, *calculations):
"""
This message will be used by clients to trigger specific calculations,
so we don't end up in calculating data that is not usually needed.
You can request calculations to the websocket server that sends you the
same message, with the required fields.
List items must be one of the following:
- margin_sym_SYMBOL (e.g. margin_sym_tBTCUSD)
- funding_sym_SYMBOL
- position_SYMBOL
- wallet_WALLET-TYPE_CURRENCY
Parameters
----------
*calculations : list
list of calculations wanted
Returns
-------
None
Data is returned over the auth channel. See the abbreviation
glossary: https://docs.bitfinex.com/v2/docs/abbreviations-glossary
Examples
--------
::
# You should only need to create and authenticate a client once.
# Then simply reuse it later
my_client = WssClient(key, secret)
my_client.authenticate(print)
my_client.start()
my_client.calc(["margin_sym_tBTCUSD", "funding_sym_fUSD"])
my_client.calc(["margin_sym_tBTCUSD"])
my_client.calc(["position_tBTCUSD"])
my_client.calc(["wallet_exachange_USD"])
.. Note::
Calculations are on demand, so no more streaming of unnecessary data.
Websocket server allows up to 30 calculations per batch.
If the client sends too many concurrent requests (or tries to spam) requests,
it will receive an error and potentially a disconnection.
The Websocket server performs a maximum of 8 calculations per second per client.
"""
data = [
0,
'calc',
None,
calculations
]
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
self.factories["auth"].protocol_instance.sendMessage(payload, isBinary=False)
| 30.79978
| 109
| 0.559739
|
449c5d92a23196d1c889009d4a75e14ad286d965
| 33,233
|
py
|
Python
|
venv/Lib/site-packages/scipy/interpolate/_cubic.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 6
|
2019-12-21T21:15:54.000Z
|
2021-04-20T17:35:24.000Z
|
venv/Lib/site-packages/scipy/interpolate/_cubic.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
venv/Lib/site-packages/scipy/interpolate/_cubic.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 3
|
2021-01-31T16:40:52.000Z
|
2021-08-29T18:32:34.000Z
|
"""Interpolation algorithms using piecewise cubic polynomials."""
import numpy as np
from . import PPoly
from .polyint import _isscalar
from scipy.linalg import solve_banded, solve
__all__ = ["CubicHermiteSpline", "PchipInterpolator", "pchip_interpolate",
"Akima1DInterpolator", "CubicSpline"]
def prepare_input(x, y, axis, dydx=None):
"""Prepare input for cubic spline interpolators.
All data are converted to numpy arrays and checked for correctness.
Axes equal to `axis` of arrays `y` and `dydx` are rolled to be the 0th
axis. The value of `axis` is converted to lie in
[0, number of dimensions of `y`).
"""
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
x = x.astype(float)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
if dydx is not None:
dydx = np.asarray(dydx)
if y.shape != dydx.shape:
raise ValueError("The shapes of `y` and `dydx` must be identical.")
if np.issubdtype(dydx.dtype, np.complexfloating):
dtype = complex
dydx = dydx.astype(dtype, copy=False)
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError("The length of `y` along `axis`={0} doesn't "
"match the length of `x`".format(axis))
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
if dydx is not None and not np.all(np.isfinite(dydx)):
raise ValueError("`dydx` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
y = np.rollaxis(y, axis)
if dydx is not None:
dydx = np.rollaxis(dydx, axis)
return x, dx, y, axis, dydx
class CubicHermiteSpline(PPoly):
"""Piecewise-cubic interpolator matching values and first derivatives.
The result is represented as a `PPoly` instance.
Parameters
----------
x : array_like, shape (n,)
1-D array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
dydx : array_like
Array containing derivatives of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), it is set to True.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same ``x`` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding ``axis``.
For example, if `y` is 1-D, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same axis which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator : Akima 1D interpolator.
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
CubicSpline : Cubic spline data interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints
Notes
-----
If you want to create a higher-order spline matching higher-order
derivatives, use `BPoly.from_derivatives`.
References
----------
.. [1] `Cubic Hermite spline
<https://en.wikipedia.org/wiki/Cubic_Hermite_spline>`_
on Wikipedia.
"""
def __init__(self, x, y, dydx, axis=0, extrapolate=None):
if extrapolate is None:
extrapolate = True
x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr
c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - dydx[:-1]) / dxr - t
c[2] = dydx[:-1]
c[3] = y[:-1]
super(CubicHermiteSpline, self).__init__(c, x, extrapolate=extrapolate)
self.axis = axis
class PchipInterpolator(CubicHermiteSpline):
r"""PCHIP 1-D monotonic cubic interpolation.
``x`` and ``y`` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. ``x`` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. ``y``'s length along the interpolation
axis must be equal to the length of ``x``. If N-D array, use ``axis``
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
CubicHermiteSpline : Piecewise-cubic interpolator.
Akima1DInterpolator : Akima 1D interpolator.
CubicSpline : Cubic spline data interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,
SIAM J. Numer. Anal., 17(2), 238 (1980).
:doi:`10.1137/0717021`.
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
:doi:`10.1137/1.9780898717952`
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x, _, y, axis, _ = prepare_input(x, y, axis)
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
dk = self._find_derivatives(xp, y)
super(PchipInterpolator, self).__init__(x, y, dk, axis=0,
extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.4
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `scipy.interpolate.PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
Examples
--------
We can interpolate 2D observed data using pchip interpolation:
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import pchip_interpolate
>>> x_observed = np.linspace(0.0, 10.0, 11)
>>> y_observed = np.sin(x_observed)
>>> x = np.linspace(min(x_observed), max(x_observed), num=100)
>>> y = pchip_interpolate(x_observed, y_observed, x)
>>> plt.plot(x_observed, y_observed, "o", label="observation")
>>> plt.plot(x, y, label="pchip interpolation")
>>> plt.legend()
>>> plt.show()
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
class Akima1DInterpolator(CubicHermiteSpline):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of ``y`` along the first axis
must be equal to the length of ``x``.
axis : int, optional
Specifies the axis of ``y`` along which to interpolate. Interpolation
defaults to the first axis of ``y``.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
CubicSpline : Cubic spline data interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y, axis=0):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation
x, dx, y, axis, _ = prepare_input(x, y, axis)
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
super(Akima1DInterpolator, self).__init__(x, y, t, axis=0,
extrapolate=False)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1-D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(CubicHermiteSpline):
"""Cubic spline data interpolator.
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-D array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding ``axis`` dimension. For example, if
`y` is 1-D, then `deriv_value` must be a scalar. If `y` is 3-D with
the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2-D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), ``extrapolate`` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same ``x`` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding ``axis``.
For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same axis which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator : Akima 1D interpolator.
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
Notes
-----
Parameters `bc_type` and ``interpolate`` work independently, i.e. the
former controls only construction of a spline, and the latter only
evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
>>> ax.plot(x, y, 'o', label='data')
>>> ax.plot(xs, np.sin(xs), label='true')
>>> ax.plot(xs, cs(xs), label="S")
>>> ax.plot(xs, cs(xs, 1), label="S'")
>>> ax.plot(xs, cs(xs, 2), label="S''")
>>> ax.plot(xs, cs(xs, 3), label="S'''")
>>> ax.set_xlim(-0.5, 9.5)
>>> ax.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
>>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> ax.plot(np.cos(xs), np.sin(xs), label='true')
>>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> ax.axes.set_aspect('equal')
>>> ax.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, dx, y, axis, _ = prepare_input(x, y, axis)
n = len(x)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the same
# way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a very special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
s = solve(A, b, overwrite_a=True, overwrite_b=True,
check_finite=False)
elif n == 3 and bc[0] == 'periodic':
# In case when number of points is 3 we should count derivatives
# manually
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
t = (slope / dxr).sum() / (1. / dxr).sum()
s.fill(t)
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the linear
# system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html
# for more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-3]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
overwrite_b=False, check_finite=False)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
s = solve_banded((1, 1), A, b, overwrite_ab=True,
overwrite_b=True, check_finite=False)
super(CubicSpline, self).__init__(x, y, s, axis=0,
extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, str):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
"The first and last `y` point along axis {} must "
"be identical (within machine precision) when "
"bc_type='periodic'.".format(axis))
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, str):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError("bc_type={} is not allowed.".format(bc))
else:
try:
deriv_order, deriv_value = bc
except Exception as e:
raise ValueError(
"A specified derivative value must be "
"given in the form (order, value)."
) from e
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
"`deriv_value` shape {} is not the expected one {}."
.format(deriv_value.shape, expected_deriv_shape))
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
| 38.823598
| 117
| 0.553817
|
f3786afadb53b75434e4fc87831d95eb8d5bee09
| 1,195
|
py
|
Python
|
AndBug/lib/andbug/errors.py
|
msayagh/ExecutedAndroidByteCode
|
111aa9738f217aaa7c963792219344438b7aefb0
|
[
"W3C"
] | null | null | null |
AndBug/lib/andbug/errors.py
|
msayagh/ExecutedAndroidByteCode
|
111aa9738f217aaa7c963792219344438b7aefb0
|
[
"W3C"
] | null | null | null |
AndBug/lib/andbug/errors.py
|
msayagh/ExecutedAndroidByteCode
|
111aa9738f217aaa7c963792219344438b7aefb0
|
[
"W3C"
] | null | null | null |
## Copyright 2011, IOActive, Inc. All rights reserved.
##
## AndBug is free software: you can redistribute it and/or modify it under
## the terms of version 3 of the GNU Lesser General Public License as
## published by the Free Software Foundation.
##
## AndBug is distributed in the hope that it will be useful, but WITHOUT ANY
## WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
## FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
## more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with AndBug. If not, see <http://www.gnu.org/licenses/>.
import sys
class UserError(Exception):
'indicates an error in how AndBug was used'
pass
class OptionError(UserError):
'indicates an error parsing an option supplied to a command'
pass
class ConfigError(UserError):
'indicates an error in the configuration of AndBug'
pass
class DependencyError(UserError):
'indicates that an optional dependency was not found'
pass
class VoidError(UserError):
'indicates a process returned a nil object'
def perr(*args):
print >>sys.stderr, ' '.join(map(str, args))
| 30.641026
| 77
| 0.734728
|
2ced0214282ba23443c71277b2ff649347069258
| 4,855
|
py
|
Python
|
docs/conf.py
|
vcutrona/elasticpedia
|
fcb23008a6fedaaa25c876a58b0365e9b273e147
|
[
"Apache-2.0"
] | 3
|
2020-03-31T15:57:21.000Z
|
2020-04-02T16:09:23.000Z
|
docs/conf.py
|
vcutrona/elasticpedia
|
fcb23008a6fedaaa25c876a58b0365e9b273e147
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
vcutrona/elasticpedia
|
fcb23008a6fedaaa25c876a58b0365e9b273e147
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# elasticpedia documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import elasticpedia
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Elasticpedia'
copyright = "2020, Vincenzo Cutrona"
author = "Vincenzo Cutrona"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = elasticpedia.__version__
# The full version, including alpha/beta/rc tags.
release = elasticpedia.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'elasticpediadoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'elasticpedia.tex',
'Elasticpedia Documentation',
'Vincenzo Cutrona', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'elasticpedia',
'Elasticpedia Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'elasticpedia',
'Elasticpedia Documentation',
author,
'elasticpedia',
'One line description of project.',
'Miscellaneous'),
]
| 29.785276
| 77
| 0.68898
|
026711b8624dfa58715060d83c268cef2cff296d
| 2,848
|
py
|
Python
|
tests/cli/test_debug.py
|
dnimtheory/WordOps
|
82fc71f5f563df0e4249cc178f768f6cf6c005f6
|
[
"MIT"
] | 2
|
2019-09-03T03:39:40.000Z
|
2021-04-22T12:09:50.000Z
|
tests/cli/test_debug.py
|
BreezeRo/WordOps
|
3c5cb8ba0ed8d619cddb170386a07102cb385727
|
[
"MIT"
] | null | null | null |
tests/cli/test_debug.py
|
BreezeRo/WordOps
|
3c5cb8ba0ed8d619cddb170386a07102cb385727
|
[
"MIT"
] | 2
|
2021-01-02T07:49:51.000Z
|
2022-03-26T15:58:50.000Z
|
from wo.utils import test
from wo.cli.main import get_test_app
class CliTestCaseDebug(test.WOTestCase):
def test_wo_cli(self):
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_stop(self):
self.app = get_test_app(argv=['debug', '--stop'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_start(self):
self.app = get_test_app(argv=['debug', '--start'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_php(self):
self.app = get_test_app(argv=['debug', '--php'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_nginx(self):
self.app = get_test_app(argv=['debug', '--nginx'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_rewrite(self):
self.app = get_test_app(argv=['debug', '--rewrite'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_fpm(self):
self.app = get_test_app(argv=['debug', '--fpm'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_mysql(self):
self.app = get_test_app(argv=['debug', '--mysql'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_import_slow_log_interval(self):
self.app = get_test_app(argv=['debug', '--mysql',
'--import-slow-log-interval'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_site_name_mysql(self):
self.app = get_test_app(argv=['debug', 'example3.com', '--mysql'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_site_name_wp(self):
self.app = get_test_app(argv=['debug', 'example4.com', '--wp'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_site_name_nginx(self):
self.app = get_test_app(argv=['debug', 'example4.com', '--nginx'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_site_name_start(self):
self.app = get_test_app(argv=['debug', 'example1.com', '--start'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_site_name_stop(self):
self.app = get_test_app(argv=['debug', 'example1.com', '--stop'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_debug_site_name_rewrite(self):
self.app = get_test_app(argv=['debug', 'example1.com', '--rewrite'])
self.app.setup()
self.app.run()
self.app.close()
| 29.666667
| 76
| 0.580407
|
c7fe4d59939d44399c28cb012ae9e78dc888d648
| 2,656
|
py
|
Python
|
src/openprocurement/tender/competitivedialogue/procedure/views/stage2/bid.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 10
|
2020-02-18T01:56:21.000Z
|
2022-03-28T00:32:57.000Z
|
src/openprocurement/tender/competitivedialogue/procedure/views/stage2/bid.py
|
quintagroup/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 26
|
2018-07-16T09:30:44.000Z
|
2021-02-02T17:51:30.000Z
|
src/openprocurement/tender/competitivedialogue/procedure/views/stage2/bid.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 15
|
2019-08-08T10:50:47.000Z
|
2022-02-05T14:13:36.000Z
|
from openprocurement.tender.openeu.procedure.views.bid import TenderBidResource as BaseResourceEU
from openprocurement.tender.openua.procedure.views.bid import TenderBidResource as BaseResourceUA
from openprocurement.tender.competitivedialogue.constants import STAGE_2_UA_TYPE, STAGE_2_EU_TYPE
from openprocurement.tender.competitivedialogue.procedure.validation import validate_firm_to_create_bid
from openprocurement.tender.openeu.procedure.models.bid import PostBid as PostBidEU
from openprocurement.tender.openua.procedure.models.bid import PostBid as PostBidUA
from openprocurement.tender.openeu.procedure.validation import (
validate_post_bid_status,
)
from openprocurement.tender.core.procedure.validation import (
validate_bid_accreditation_level,
validate_input_data,
validate_data_documents,
validate_bid_operation_period,
validate_bid_operation_not_in_tendering,
)
from openprocurement.api.utils import json_view
from cornice.resource import resource
from logging import getLogger
LOGGER = getLogger(__name__)
@resource(
name="{}:Tender Bids".format(STAGE_2_EU_TYPE),
collection_path="/tenders/{tender_id}/bids",
path="/tenders/{tender_id}/bids/{bid_id}",
procurementMethodType=STAGE_2_EU_TYPE,
description="Competitive Dialogue Stage2EU bids",
)
class CompetitiveDialogueStage2EUBidResource(BaseResourceEU):
@json_view(
content_type="application/json",
permission="create_bid",
validators=(
validate_bid_accreditation_level,
validate_bid_operation_not_in_tendering,
validate_bid_operation_period,
validate_input_data(PostBidEU),
validate_post_bid_status,
validate_firm_to_create_bid,
validate_data_documents,
),
)
def collection_post(self):
return super().collection_post()
@resource(
name="{}:Tender Bids".format(STAGE_2_UA_TYPE),
collection_path="/tenders/{tender_id}/bids",
path="/tenders/{tender_id}/bids/{bid_id}",
procurementMethodType=STAGE_2_UA_TYPE,
description="Competitive Dialogue Stage2 UA bids",
)
class CompetitiveDialogueStage2UABidResource(BaseResourceUA):
@json_view(
content_type="application/json",
permission="create_bid",
validators=(
validate_bid_accreditation_level,
validate_bid_operation_not_in_tendering,
validate_bid_operation_period,
validate_input_data(PostBidUA),
validate_firm_to_create_bid,
validate_data_documents,
),
)
def collection_post(self):
return super().collection_post()
| 37.408451
| 103
| 0.752636
|
04e3808516792ff017c8da2f7469b166d652c4c8
| 241
|
py
|
Python
|
bookmarks/actions/admin.py
|
xaldey/bookmarks
|
e2cbed38a06f343b2d61ab7eac08388200efcb04
|
[
"Apache-2.0"
] | null | null | null |
bookmarks/actions/admin.py
|
xaldey/bookmarks
|
e2cbed38a06f343b2d61ab7eac08388200efcb04
|
[
"Apache-2.0"
] | null | null | null |
bookmarks/actions/admin.py
|
xaldey/bookmarks
|
e2cbed38a06f343b2d61ab7eac08388200efcb04
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import Action
@admin.register(Action)
class ActionAdmin(admin.ModelAdmin):
list_display = ('user', 'verb', 'target', 'created')
list_filter = ('created',)
search_filter = ('verb',)
| 24.1
| 56
| 0.697095
|
ddc16e101fec45504b2ce3013ba78cce5777b92c
| 21,052
|
py
|
Python
|
netbox/netbox/settings.py
|
msrt500/netbox
|
7bd853e87b51470c50663dedfc07b29774f2707b
|
[
"Apache-2.0"
] | 1
|
2021-03-29T18:17:16.000Z
|
2021-03-29T18:17:16.000Z
|
netbox/netbox/settings.py
|
msrt500/netbox
|
7bd853e87b51470c50663dedfc07b29774f2707b
|
[
"Apache-2.0"
] | null | null | null |
netbox/netbox/settings.py
|
msrt500/netbox
|
7bd853e87b51470c50663dedfc07b29774f2707b
|
[
"Apache-2.0"
] | null | null | null |
import importlib
import logging
import os
import platform
import re
import socket
import warnings
from urllib.parse import urlsplit
from django.contrib.messages import constants as messages
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.validators import URLValidator
#
# Environment setup
#
VERSION = '2.10.9-dev'
# Hostname
HOSTNAME = platform.node()
# Set the base directory two levels up
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Validate Python version
if platform.python_version_tuple() < ('3', '6'):
raise RuntimeError(
"NetBox requires Python 3.6 or higher (current: Python {})".format(platform.python_version())
)
#
# Configuration import
#
# Import configuration parameters
try:
from netbox import configuration
except ModuleNotFoundError as e:
if getattr(e, 'name') == 'configuration':
raise ImproperlyConfigured(
"Configuration file is not present. Please define netbox/netbox/configuration.py per the documentation."
)
raise
# Enforce required configuration parameters
for parameter in ['ALLOWED_HOSTS', 'DATABASE', 'SECRET_KEY', 'REDIS']:
if not hasattr(configuration, parameter):
raise ImproperlyConfigured(
"Required parameter {} is missing from configuration.py.".format(parameter)
)
# Set required parameters
ALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS')
DATABASE = getattr(configuration, 'DATABASE')
REDIS = getattr(configuration, 'REDIS')
SECRET_KEY = getattr(configuration, 'SECRET_KEY')
# Set optional parameters
ADMINS = getattr(configuration, 'ADMINS', [])
ALLOWED_URL_SCHEMES = getattr(configuration, 'ALLOWED_URL_SCHEMES', (
'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc', 'xmpp',
))
BANNER_BOTTOM = getattr(configuration, 'BANNER_BOTTOM', '')
BANNER_LOGIN = getattr(configuration, 'BANNER_LOGIN', '')
BANNER_TOP = getattr(configuration, 'BANNER_TOP', '')
BASE_PATH = getattr(configuration, 'BASE_PATH', '')
if BASE_PATH:
BASE_PATH = BASE_PATH.strip('/') + '/' # Enforce trailing slash only
CACHE_TIMEOUT = getattr(configuration, 'CACHE_TIMEOUT', 900)
CHANGELOG_RETENTION = getattr(configuration, 'CHANGELOG_RETENTION', 90)
CORS_ORIGIN_ALLOW_ALL = getattr(configuration, 'CORS_ORIGIN_ALLOW_ALL', False)
CORS_ORIGIN_REGEX_WHITELIST = getattr(configuration, 'CORS_ORIGIN_REGEX_WHITELIST', [])
CORS_ORIGIN_WHITELIST = getattr(configuration, 'CORS_ORIGIN_WHITELIST', [])
DATE_FORMAT = getattr(configuration, 'DATE_FORMAT', 'N j, Y')
DATETIME_FORMAT = getattr(configuration, 'DATETIME_FORMAT', 'N j, Y g:i a')
DEBUG = getattr(configuration, 'DEBUG', False)
DEVELOPER = getattr(configuration, 'DEVELOPER', False)
DOCS_ROOT = getattr(configuration, 'DOCS_ROOT', os.path.join(os.path.dirname(BASE_DIR), 'docs'))
EMAIL = getattr(configuration, 'EMAIL', {})
ENFORCE_GLOBAL_UNIQUE = getattr(configuration, 'ENFORCE_GLOBAL_UNIQUE', False)
EXEMPT_VIEW_PERMISSIONS = getattr(configuration, 'EXEMPT_VIEW_PERMISSIONS', [])
HTTP_PROXIES = getattr(configuration, 'HTTP_PROXIES', None)
INTERNAL_IPS = getattr(configuration, 'INTERNAL_IPS', ('127.0.0.1', '::1'))
LOGGING = getattr(configuration, 'LOGGING', {})
LOGIN_REQUIRED = getattr(configuration, 'LOGIN_REQUIRED', False)
LOGIN_TIMEOUT = getattr(configuration, 'LOGIN_TIMEOUT', None)
MAINTENANCE_MODE = getattr(configuration, 'MAINTENANCE_MODE', False)
MAPS_URL = getattr(configuration, 'MAPS_URL', 'https://maps.google.com/?q=')
MAX_PAGE_SIZE = getattr(configuration, 'MAX_PAGE_SIZE', 1000)
MEDIA_ROOT = getattr(configuration, 'MEDIA_ROOT', os.path.join(BASE_DIR, 'media')).rstrip('/')
METRICS_ENABLED = getattr(configuration, 'METRICS_ENABLED', False)
NAPALM_ARGS = getattr(configuration, 'NAPALM_ARGS', {})
NAPALM_PASSWORD = getattr(configuration, 'NAPALM_PASSWORD', '')
NAPALM_TIMEOUT = getattr(configuration, 'NAPALM_TIMEOUT', 30)
NAPALM_USERNAME = getattr(configuration, 'NAPALM_USERNAME', '')
PAGINATE_COUNT = getattr(configuration, 'PAGINATE_COUNT', 50)
PLUGINS = getattr(configuration, 'PLUGINS', [])
PLUGINS_CONFIG = getattr(configuration, 'PLUGINS_CONFIG', {})
PREFER_IPV4 = getattr(configuration, 'PREFER_IPV4', False)
RACK_ELEVATION_DEFAULT_UNIT_HEIGHT = getattr(configuration, 'RACK_ELEVATION_DEFAULT_UNIT_HEIGHT', 22)
RACK_ELEVATION_DEFAULT_UNIT_WIDTH = getattr(configuration, 'RACK_ELEVATION_DEFAULT_UNIT_WIDTH', 220)
REMOTE_AUTH_AUTO_CREATE_USER = getattr(configuration, 'REMOTE_AUTH_AUTO_CREATE_USER', False)
REMOTE_AUTH_BACKEND = getattr(configuration, 'REMOTE_AUTH_BACKEND', 'netbox.authentication.RemoteUserBackend')
REMOTE_AUTH_DEFAULT_GROUPS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_GROUPS', [])
REMOTE_AUTH_DEFAULT_PERMISSIONS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_PERMISSIONS', {})
REMOTE_AUTH_ENABLED = getattr(configuration, 'REMOTE_AUTH_ENABLED', False)
REMOTE_AUTH_HEADER = getattr(configuration, 'REMOTE_AUTH_HEADER', 'HTTP_REMOTE_USER')
RELEASE_CHECK_URL = getattr(configuration, 'RELEASE_CHECK_URL', None)
RELEASE_CHECK_TIMEOUT = getattr(configuration, 'RELEASE_CHECK_TIMEOUT', 24 * 3600)
REPORTS_ROOT = getattr(configuration, 'REPORTS_ROOT', os.path.join(BASE_DIR, 'reports')).rstrip('/')
RQ_DEFAULT_TIMEOUT = getattr(configuration, 'RQ_DEFAULT_TIMEOUT', 300)
SCRIPTS_ROOT = getattr(configuration, 'SCRIPTS_ROOT', os.path.join(BASE_DIR, 'scripts')).rstrip('/')
SESSION_FILE_PATH = getattr(configuration, 'SESSION_FILE_PATH', None)
SHORT_DATE_FORMAT = getattr(configuration, 'SHORT_DATE_FORMAT', 'Y-m-d')
SHORT_DATETIME_FORMAT = getattr(configuration, 'SHORT_DATETIME_FORMAT', 'Y-m-d H:i')
SHORT_TIME_FORMAT = getattr(configuration, 'SHORT_TIME_FORMAT', 'H:i:s')
STORAGE_BACKEND = getattr(configuration, 'STORAGE_BACKEND', None)
STORAGE_CONFIG = getattr(configuration, 'STORAGE_CONFIG', {})
TIME_FORMAT = getattr(configuration, 'TIME_FORMAT', 'g:i a')
TIME_ZONE = getattr(configuration, 'TIME_ZONE', 'UTC')
# Validate update repo URL and timeout
if RELEASE_CHECK_URL:
validator = URLValidator(
message=(
"RELEASE_CHECK_URL must be a valid API URL. Example: "
"https://api.github.com/repos/netbox-community/netbox"
)
)
try:
validator(RELEASE_CHECK_URL)
except ValidationError as err:
raise ImproperlyConfigured(str(err))
# Enforce a minimum cache timeout for update checks
if RELEASE_CHECK_TIMEOUT < 3600:
raise ImproperlyConfigured("RELEASE_CHECK_TIMEOUT has to be at least 3600 seconds (1 hour)")
#
# Database
#
# Only PostgreSQL is supported
if METRICS_ENABLED:
DATABASE.update({
'ENGINE': 'django_prometheus.db.backends.postgresql'
})
else:
DATABASE.update({
'ENGINE': 'django.db.backends.postgresql'
})
DATABASES = {
'default': DATABASE,
}
#
# Media storage
#
if STORAGE_BACKEND is not None:
DEFAULT_FILE_STORAGE = STORAGE_BACKEND
# django-storages
if STORAGE_BACKEND.startswith('storages.'):
try:
import storages.utils
except ModuleNotFoundError as e:
if getattr(e, 'name') == 'storages':
raise ImproperlyConfigured(
f"STORAGE_BACKEND is set to {STORAGE_BACKEND} but django-storages is not present. It can be "
f"installed by running 'pip install django-storages'."
)
raise e
# Monkey-patch django-storages to fetch settings from STORAGE_CONFIG
def _setting(name, default=None):
if name in STORAGE_CONFIG:
return STORAGE_CONFIG[name]
return globals().get(name, default)
storages.utils.setting = _setting
if STORAGE_CONFIG and STORAGE_BACKEND is None:
warnings.warn(
"STORAGE_CONFIG has been set in configuration.py but STORAGE_BACKEND is not defined. STORAGE_CONFIG will be "
"ignored."
)
#
# Redis
#
# Background task queuing
if 'tasks' not in REDIS:
raise ImproperlyConfigured(
"REDIS section in configuration.py is missing the 'tasks' subsection."
)
TASKS_REDIS = REDIS['tasks']
TASKS_REDIS_HOST = TASKS_REDIS.get('HOST', 'localhost')
TASKS_REDIS_PORT = TASKS_REDIS.get('PORT', 6379)
TASKS_REDIS_SENTINELS = TASKS_REDIS.get('SENTINELS', [])
TASKS_REDIS_USING_SENTINEL = all([
isinstance(TASKS_REDIS_SENTINELS, (list, tuple)),
len(TASKS_REDIS_SENTINELS) > 0
])
TASKS_REDIS_SENTINEL_SERVICE = TASKS_REDIS.get('SENTINEL_SERVICE', 'default')
TASKS_REDIS_SENTINEL_TIMEOUT = TASKS_REDIS.get('SENTINEL_TIMEOUT', 10)
TASKS_REDIS_PASSWORD = TASKS_REDIS.get('PASSWORD', '')
TASKS_REDIS_DATABASE = TASKS_REDIS.get('DATABASE', 0)
TASKS_REDIS_SSL = TASKS_REDIS.get('SSL', False)
# Caching
if 'caching' not in REDIS:
raise ImproperlyConfigured(
"REDIS section in configuration.py is missing caching subsection."
)
CACHING_REDIS = REDIS['caching']
CACHING_REDIS_HOST = CACHING_REDIS.get('HOST', 'localhost')
CACHING_REDIS_PORT = CACHING_REDIS.get('PORT', 6379)
CACHING_REDIS_SENTINELS = CACHING_REDIS.get('SENTINELS', [])
CACHING_REDIS_USING_SENTINEL = all([
isinstance(CACHING_REDIS_SENTINELS, (list, tuple)),
len(CACHING_REDIS_SENTINELS) > 0
])
CACHING_REDIS_SENTINEL_SERVICE = CACHING_REDIS.get('SENTINEL_SERVICE', 'default')
CACHING_REDIS_PASSWORD = CACHING_REDIS.get('PASSWORD', '')
CACHING_REDIS_DATABASE = CACHING_REDIS.get('DATABASE', 0)
CACHING_REDIS_SSL = CACHING_REDIS.get('SSL', False)
#
# Sessions
#
if LOGIN_TIMEOUT is not None:
# Django default is 1209600 seconds (14 days)
SESSION_COOKIE_AGE = LOGIN_TIMEOUT
if SESSION_FILE_PATH is not None:
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
#
# Email
#
EMAIL_HOST = EMAIL.get('SERVER')
EMAIL_HOST_USER = EMAIL.get('USERNAME')
EMAIL_HOST_PASSWORD = EMAIL.get('PASSWORD')
EMAIL_PORT = EMAIL.get('PORT', 25)
EMAIL_SSL_CERTFILE = EMAIL.get('SSL_CERTFILE')
EMAIL_SSL_KEYFILE = EMAIL.get('SSL_KEYFILE')
EMAIL_SUBJECT_PREFIX = '[NetBox] '
EMAIL_USE_SSL = EMAIL.get('USE_SSL', False)
EMAIL_USE_TLS = EMAIL.get('USE_TLS', False)
EMAIL_TIMEOUT = EMAIL.get('TIMEOUT', 10)
SERVER_EMAIL = EMAIL.get('FROM_EMAIL')
#
# Django
#
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'cacheops',
'corsheaders',
'debug_toolbar',
'django_filters',
'django_tables2',
'django_prometheus',
'mptt',
'rest_framework',
'taggit',
'timezone_field',
'circuits',
'dcim',
'ipam',
'extras',
'secrets',
'tenancy',
'users',
'utilities',
'virtualization',
'django_rq', # Must come after extras to allow overriding management commands
'drf_yasg',
]
# Middleware
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'netbox.middleware.ExceptionHandlingMiddleware',
'netbox.middleware.RemoteUserMiddleware',
'netbox.middleware.LoginRequiredMiddleware',
'netbox.middleware.APIVersionMiddleware',
'netbox.middleware.ObjectChangeMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware',
]
ROOT_URLCONF = 'netbox.urls'
TEMPLATES_DIR = BASE_DIR + '/templates'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'netbox.context_processors.settings_and_registry',
],
},
},
]
# Set up authentication backends
AUTHENTICATION_BACKENDS = [
REMOTE_AUTH_BACKEND,
'netbox.authentication.ObjectPermissionBackend',
]
# Internationalization
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_TZ = True
# WSGI
WSGI_APPLICATION = 'netbox.wsgi.application'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
USE_X_FORWARDED_HOST = True
X_FRAME_OPTIONS = 'SAMEORIGIN'
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = BASE_DIR + '/static'
STATIC_URL = '/{}static/'.format(BASE_PATH)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "project-static"),
)
# Media
MEDIA_URL = '/{}media/'.format(BASE_PATH)
# Disable default limit of 1000 fields per request. Needed for bulk deletion of objects. (Added in Django 1.10.)
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
# Messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Authentication URLs
LOGIN_URL = '/{}login/'.format(BASE_PATH)
CSRF_TRUSTED_ORIGINS = ALLOWED_HOSTS
# Exclude potentially sensitive models from wildcard view exemption. These may still be exempted
# by specifying the model individually in the EXEMPT_VIEW_PERMISSIONS configuration parameter.
EXEMPT_EXCLUDE_MODELS = (
('auth', 'group'),
('auth', 'user'),
('users', 'objectpermission'),
)
#
# Caching
#
if CACHING_REDIS_USING_SENTINEL:
CACHEOPS_SENTINEL = {
'locations': CACHING_REDIS_SENTINELS,
'service_name': CACHING_REDIS_SENTINEL_SERVICE,
'db': CACHING_REDIS_DATABASE,
'password': CACHING_REDIS_PASSWORD,
}
else:
if CACHING_REDIS_SSL:
REDIS_CACHE_CON_STRING = 'rediss://'
else:
REDIS_CACHE_CON_STRING = 'redis://'
if CACHING_REDIS_PASSWORD:
REDIS_CACHE_CON_STRING = '{}:{}@'.format(REDIS_CACHE_CON_STRING, CACHING_REDIS_PASSWORD)
REDIS_CACHE_CON_STRING = '{}{}:{}/{}'.format(
REDIS_CACHE_CON_STRING,
CACHING_REDIS_HOST,
CACHING_REDIS_PORT,
CACHING_REDIS_DATABASE
)
CACHEOPS_REDIS = REDIS_CACHE_CON_STRING
if not CACHE_TIMEOUT:
CACHEOPS_ENABLED = False
else:
CACHEOPS_ENABLED = True
CACHEOPS_DEFAULTS = {
'timeout': CACHE_TIMEOUT
}
CACHEOPS = {
'auth.user': {'ops': 'get', 'timeout': 60 * 15},
'auth.*': {'ops': ('fetch', 'get')},
'auth.permission': {'ops': 'all'},
'circuits.*': {'ops': 'all'},
'dcim.inventoryitem': None, # MPTT models are exempt due to raw SQL
'dcim.region': None, # MPTT models are exempt due to raw SQL
'dcim.rackgroup': None, # MPTT models are exempt due to raw SQL
'dcim.*': {'ops': 'all'},
'ipam.*': {'ops': 'all'},
'extras.*': {'ops': 'all'},
'secrets.*': {'ops': 'all'},
'users.*': {'ops': 'all'},
'tenancy.tenantgroup': None, # MPTT models are exempt due to raw SQL
'tenancy.*': {'ops': 'all'},
'virtualization.*': {'ops': 'all'},
}
CACHEOPS_DEGRADE_ON_FAILURE = True
#
# Django Prometheus
#
PROMETHEUS_EXPORT_MIGRATIONS = False
#
# Django filters
#
FILTERS_NULL_CHOICE_LABEL = 'None'
FILTERS_NULL_CHOICE_VALUE = 'null'
#
# Django REST framework (API)
#
REST_FRAMEWORK_VERSION = VERSION.rsplit('.', 1)[0] # Use major.minor as API version
REST_FRAMEWORK = {
'ALLOWED_VERSIONS': [REST_FRAMEWORK_VERSION],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'netbox.api.authentication.TokenAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'DEFAULT_METADATA_CLASS': 'netbox.api.metadata.BulkOperationMetadata',
'DEFAULT_PAGINATION_CLASS': 'netbox.api.pagination.OptionalLimitOffsetPagination',
'DEFAULT_PERMISSION_CLASSES': (
'netbox.api.authentication.TokenPermissions',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'netbox.api.renderers.FormlessBrowsableAPIRenderer',
),
'DEFAULT_VERSION': REST_FRAMEWORK_VERSION,
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.AcceptHeaderVersioning',
'PAGE_SIZE': PAGINATE_COUNT,
'SCHEMA_COERCE_METHOD_NAMES': {
# Default mappings
'retrieve': 'read',
'destroy': 'delete',
# Custom operations
'bulk_destroy': 'bulk_delete',
},
'VIEW_NAME_FUNCTION': 'utilities.api.get_view_name',
}
#
# drf_yasg (OpenAPI/Swagger)
#
SWAGGER_SETTINGS = {
'DEFAULT_AUTO_SCHEMA_CLASS': 'utilities.custom_inspectors.NetBoxSwaggerAutoSchema',
'DEFAULT_FIELD_INSPECTORS': [
'utilities.custom_inspectors.CustomFieldsDataFieldInspector',
'utilities.custom_inspectors.JSONFieldInspector',
'utilities.custom_inspectors.NullableBooleanFieldInspector',
'utilities.custom_inspectors.ChoiceFieldInspector',
'utilities.custom_inspectors.SerializedPKRelatedFieldInspector',
'drf_yasg.inspectors.CamelCaseJSONFilter',
'drf_yasg.inspectors.ReferencingSerializerInspector',
'drf_yasg.inspectors.RelatedFieldInspector',
'drf_yasg.inspectors.ChoiceFieldInspector',
'drf_yasg.inspectors.FileFieldInspector',
'drf_yasg.inspectors.DictFieldInspector',
'drf_yasg.inspectors.SerializerMethodFieldInspector',
'drf_yasg.inspectors.SimpleFieldInspector',
'drf_yasg.inspectors.StringDefaultFieldInspector',
],
'DEFAULT_FILTER_INSPECTORS': [
'drf_yasg.inspectors.CoreAPICompatInspector',
],
'DEFAULT_INFO': 'netbox.urls.openapi_info',
'DEFAULT_MODEL_DEPTH': 1,
'DEFAULT_PAGINATOR_INSPECTORS': [
'utilities.custom_inspectors.NullablePaginatorInspector',
'drf_yasg.inspectors.DjangoRestResponsePagination',
'drf_yasg.inspectors.CoreAPICompatInspector',
],
'SECURITY_DEFINITIONS': {
'Bearer': {
'type': 'apiKey',
'name': 'Authorization',
'in': 'header',
}
},
'VALIDATOR_URL': None,
}
#
# Django RQ (Webhooks backend)
#
if TASKS_REDIS_USING_SENTINEL:
RQ_PARAMS = {
'SENTINELS': TASKS_REDIS_SENTINELS,
'MASTER_NAME': TASKS_REDIS_SENTINEL_SERVICE,
'DB': TASKS_REDIS_DATABASE,
'PASSWORD': TASKS_REDIS_PASSWORD,
'SOCKET_TIMEOUT': None,
'CONNECTION_KWARGS': {
'socket_connect_timeout': TASKS_REDIS_SENTINEL_TIMEOUT
},
}
else:
RQ_PARAMS = {
'HOST': TASKS_REDIS_HOST,
'PORT': TASKS_REDIS_PORT,
'DB': TASKS_REDIS_DATABASE,
'PASSWORD': TASKS_REDIS_PASSWORD,
'SSL': TASKS_REDIS_SSL,
'DEFAULT_TIMEOUT': RQ_DEFAULT_TIMEOUT,
}
RQ_QUEUES = {
'default': RQ_PARAMS, # Webhooks
'check_releases': RQ_PARAMS,
}
#
# NetBox internal settings
#
# Secrets
SECRETS_MIN_PUBKEY_SIZE = 2048
# Pagination
PER_PAGE_DEFAULTS = [
25, 50, 100, 250, 500, 1000
]
if PAGINATE_COUNT not in PER_PAGE_DEFAULTS:
PER_PAGE_DEFAULTS.append(PAGINATE_COUNT)
PER_PAGE_DEFAULTS = sorted(PER_PAGE_DEFAULTS)
#
# Plugins
#
for plugin_name in PLUGINS:
# Import plugin module
try:
plugin = importlib.import_module(plugin_name)
except ModuleNotFoundError as e:
if getattr(e, 'name') == plugin_name:
raise ImproperlyConfigured(
"Unable to import plugin {}: Module not found. Check that the plugin module has been installed within the "
"correct Python environment.".format(plugin_name)
)
raise e
# Determine plugin config and add to INSTALLED_APPS.
try:
plugin_config = plugin.config
INSTALLED_APPS.append("{}.{}".format(plugin_config.__module__, plugin_config.__name__))
except AttributeError:
raise ImproperlyConfigured(
"Plugin {} does not provide a 'config' variable. This should be defined in the plugin's __init__.py file "
"and point to the PluginConfig subclass.".format(plugin_name)
)
# Validate user-provided configuration settings and assign defaults
if plugin_name not in PLUGINS_CONFIG:
PLUGINS_CONFIG[plugin_name] = {}
plugin_config.validate(PLUGINS_CONFIG[plugin_name], VERSION)
# Add middleware
plugin_middleware = plugin_config.middleware
if plugin_middleware and type(plugin_middleware) in (list, tuple):
MIDDLEWARE.extend(plugin_middleware)
# Apply cacheops config
if type(plugin_config.caching_config) is not dict:
raise ImproperlyConfigured(
"Plugin {} caching_config must be a dictionary.".format(plugin_name)
)
CACHEOPS.update({
"{}.{}".format(plugin_name, key): value for key, value in plugin_config.caching_config.items()
})
| 33.257504
| 123
| 0.713519
|
a38e555ac59d765cc49fba71ec0acb813da6a558
| 5,917
|
py
|
Python
|
apps/node_man/periodic_tasks/sync_proc_status_task.py
|
ZhuoZhuoCrayon/bk-nodeman
|
76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab
|
[
"MIT"
] | null | null | null |
apps/node_man/periodic_tasks/sync_proc_status_task.py
|
ZhuoZhuoCrayon/bk-nodeman
|
76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab
|
[
"MIT"
] | null | null | null |
apps/node_man/periodic_tasks/sync_proc_status_task.py
|
ZhuoZhuoCrayon/bk-nodeman
|
76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from celery.schedules import crontab
from celery.task import periodic_task, task
from apps.component.esbclient import client_v2
from apps.node_man import constants as const
from apps.node_man.models import GsePluginDesc, Host, ProcessStatus
from apps.utils.periodic_task import calculate_countdown
from common.log import logger
def get_version(version_str):
version = const.VERSION_PATTERN.search(version_str)
return version.group() if version else ""
def query_proc_status(start=0, limit=const.QUERY_PROC_STATUS_HOST_LENS):
kwargs = {"meta": {"namespace": "nodeman"}, "page": {"start": start, "limit": limit}}
data = client_v2.gse.sync_proc_status(kwargs)
return data.get("count") or 0, data.get("proc_infos") or []
@task(queue="default", ignore_result=True)
def update_or_create_proc_status(task_id, sync_proc_list, start):
logger.info(f"{task_id} | Sync process status start flag: {start}")
_, proc_infos = query_proc_status(start)
bk_host_ips = []
bk_cloud_ids = []
proc_name_list = []
host_proc_status_map = {}
for info in proc_infos:
if info["meta"]["name"] not in sync_proc_list:
continue
if info["meta"]["name"] not in proc_name_list:
proc_name_list.append(info["meta"]["name"])
bk_host_ips.append(info["host"]["ip"])
bk_cloud_ids.append(info["host"]["bk_cloud_id"])
host_proc_status_map[f'{info["host"]["ip"]}:{info["host"]["bk_cloud_id"]}'] = {
"version": get_version(info["version"]),
"status": const.PLUGIN_STATUS_DICT[info["status"]],
"is_auto": const.AutoStateType.AUTO if info["isauto"] else const.AutoStateType.UNAUTO,
"name": info["meta"]["name"],
}
# 查询已存在的主机
hosts = Host.objects.filter(inner_ip__in=bk_host_ips, bk_cloud_id__in=bk_cloud_ids).values(
"bk_host_id", "inner_ip", "bk_cloud_id"
)
bk_host_id_map = {}
for host in hosts:
bk_host_id_map[f"{host['inner_ip']}:{host['bk_cloud_id']}"] = host["bk_host_id"]
process_status_objs = ProcessStatus.objects.filter(
name__in=proc_name_list,
bk_host_id__in=bk_host_id_map.values(),
source_type=ProcessStatus.SourceType.DEFAULT,
proc_type=const.ProcType.PLUGIN,
is_latest=True,
).values("bk_host_id", "id", "name", "status")
host_proc_key__proc_map = {}
for item in process_status_objs:
host_proc_key__proc_map[f"{item['name']}:{item['bk_host_id']}"] = item
need_update_status = []
need_create_status = []
for host_cloud_key, host_proc_info in host_proc_status_map.items():
if host_cloud_key not in bk_host_id_map:
continue
db_proc_info = host_proc_key__proc_map.get(f'{host_proc_info["name"]}:{bk_host_id_map[host_cloud_key]}')
# 如果DB中进程状态为手动停止,并且同步回来的进程状态为终止,此时保持手动停止的标记,用于订阅的豁免操作
if (
db_proc_info
and db_proc_info["status"] == const.ProcStateType.MANUAL_STOP
and host_proc_info["status"] == const.ProcStateType.TERMINATED
):
host_proc_info["status"] = db_proc_info["status"]
if db_proc_info:
# need update
obj = ProcessStatus(
pk=db_proc_info["id"],
status=host_proc_info["status"],
version=host_proc_info["version"],
is_auto=host_proc_info["is_auto"],
)
need_update_status.append(obj)
else:
# need create
obj = ProcessStatus(
status=host_proc_info["status"],
version=host_proc_info["version"],
is_auto=host_proc_info["is_auto"],
name=host_proc_info["name"],
source_type=ProcessStatus.SourceType.DEFAULT,
proc_type=const.ProcType.PLUGIN,
bk_host_id=bk_host_id_map[host_cloud_key],
is_latest=True,
)
need_create_status.append(obj)
ProcessStatus.objects.bulk_update(need_update_status, fields=["status", "version", "is_auto"])
ProcessStatus.objects.bulk_create(need_create_status)
logger.info(f"{task_id} | Sync process status start flag: {start} complate")
@periodic_task(
queue="default",
options={"queue": "default"},
run_every=crontab(hour="*", minute="*/15", day_of_week="*", day_of_month="*", month_of_year="*"),
)
def sync_proc_status_task():
sync_proc_list = GsePluginDesc.objects.filter(category=const.CategoryType.official).values_list("name", flat=True)
task_id = sync_proc_status_task.request.id
count, _ = query_proc_status(limit=1)
logger.info(f"{task_id} | sync host proc status count={count}.")
for start in range(0, count, const.QUERY_PROC_STATUS_HOST_LENS):
countdown = calculate_countdown(
count / const.QUERY_PROC_STATUS_HOST_LENS, start / const.QUERY_PROC_STATUS_HOST_LENS
)
logger.info(f"{task_id} | sync host proc status after {countdown} seconds")
update_or_create_proc_status.apply_async((task_id, sync_proc_list, start), countdown=countdown)
logger.info(f"{task_id} | sync host proc status complate.")
| 42.568345
| 118
| 0.676694
|
f6f20feb9d6b3c2f2fe81c84e8c7f2b522e7d047
| 615
|
py
|
Python
|
pyCHAMP/solver/dmc.py
|
NLESC-JCER/pyCHAMP
|
97523237b3521a426d664b6e2972257045ff8f5e
|
[
"Apache-2.0"
] | 4
|
2019-05-15T13:09:23.000Z
|
2021-03-28T09:10:11.000Z
|
pyCHAMP/solver/dmc.py
|
NLESC-JCER/pyCHAMP
|
97523237b3521a426d664b6e2972257045ff8f5e
|
[
"Apache-2.0"
] | 14
|
2019-04-23T15:05:07.000Z
|
2019-08-14T13:21:07.000Z
|
pyCHAMP/solver/dmc.py
|
NLESC-JCER/pyCHAMP
|
97523237b3521a426d664b6e2972257045ff8f5e
|
[
"Apache-2.0"
] | 1
|
2019-09-30T22:55:53.000Z
|
2019-09-30T22:55:53.000Z
|
from functools import partial
from pyCHAMP.solver.solver_base import SolverBase
class DMC(SolverBase):
def __init__(self, wf=None, sampler=None, optimizer=None):
SolverBase.__init__(self, wf, sampler, optimizer)
def sample(self, param):
wf_func = partial(self.wf.values, param)
self.sampler.set_wf(wf_func)
drift_func = partial(self.wf.drift_fd, param)
self.sampler.set_drift_func(drift_func)
energy_func = partial(self.wf.local_energy, param)
self.sampler.set_energy_func(energy_func)
pos = self.sampler.generate()
return pos
| 26.73913
| 62
| 0.692683
|
04ef5071c3157fd04b305942eeee5ca567e780e1
| 7,976
|
py
|
Python
|
ohapi/utils_fs.py
|
mcescalante/open-humans-api
|
e2438d6c04c4e8776b5402ed2807136b84ae94c1
|
[
"MIT"
] | null | null | null |
ohapi/utils_fs.py
|
mcescalante/open-humans-api
|
e2438d6c04c4e8776b5402ed2807136b84ae94c1
|
[
"MIT"
] | null | null | null |
ohapi/utils_fs.py
|
mcescalante/open-humans-api
|
e2438d6c04c4e8776b5402ed2807136b84ae94c1
|
[
"MIT"
] | null | null | null |
"""
Utility functions to sync and work with Open Humans data in a local filesystem.
"""
import csv
import hashlib
import logging
import os
import re
import arrow
from humanfriendly import format_size, parse_size
import requests
MAX_FILE_DEFAULT = parse_size('128m')
def strip_zip_suffix(filename):
if filename.endswith('.gz'):
return filename[:-3]
elif filename.endswith('.bz2'):
return filename[:-4]
else:
return filename
def guess_tags(filename):
tags = []
stripped_filename = strip_zip_suffix(filename)
if stripped_filename.endswith('.vcf'):
tags.append('vcf')
if stripped_filename.endswith('.json'):
tags.append('json')
if stripped_filename.endswith('.csv'):
tags.append('csv')
return tags
def characterize_local_files(filedir, max_bytes=MAX_FILE_DEFAULT):
"""
Collate local file info as preperation for Open Humans upload.
Note: Files with filesize > max_bytes are not included in returned info.
"""
file_data = {}
logging.info('Characterizing files in {}'.format(filedir))
for filename in os.listdir(filedir):
filepath = os.path.join(filedir, filename)
file_stats = os.stat(filepath)
creation_date = arrow.get(file_stats.st_ctime).isoformat()
file_size = file_stats.st_size
if file_size <= max_bytes:
file_md5 = hashlib.md5()
with open(filepath, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
file_md5.update(chunk)
md5 = file_md5.hexdigest()
file_data[filename] = {
'tags': guess_tags(filename),
'description': '',
'md5': md5,
'creation_date': creation_date,
}
return file_data
def validate_metadata(target_dir, metadata):
"""
Check that the files listed in metadata exactly match files in target dir.
"""
file_list = os.listdir(target_dir)
for filename in file_list:
if filename not in metadata:
return False
for filename in metadata:
if filename not in file_list:
return False
return True
def load_metadata_csv_single_user(csv_in, header, tags_idx):
"""
Return the metadata as requested for a single user.
"""
metadata = {}
for row in csv_in:
if row[0] == 'None' and [x == 'NA' for x in row[1:]]:
break
metadata[row[0]] = {
header[i]: row[i] for i in range(1, len(header)) if
i != tags_idx
}
metadata[row[0]]['tags'] = [t.strip() for t in
row[tags_idx].split(',') if
t.strip()]
return metadata
def load_metadata_csv_multi_user(csv_in, header, tags_idx):
"""
Return the metadata as requested for a single user.
"""
metadata = {}
for row in csv_in:
if row[0] not in metadata:
metadata[row[0]] = {}
if row[1] == 'None' and all([x == 'NA' for x in row[2:]]):
continue
metadata[row[0]][row[1]] = {
header[i]: row[i] for i in range(2, len(header)) if
i != tags_idx
}
metadata[row[0]][row[1]]['tags'] = [t.strip() for t in
row[tags_idx].split(',') if
t.strip()]
return metadata
def load_metadata_csv(input_filepath):
"""
Return dict of metadata.
Format is either dict (filenames are keys) or dict-of-dicts (project member
IDs as top level keys, then filenames as keys).
"""
with open(input_filepath) as f:
csv_in = csv.reader(f)
header = next(csv_in)
try:
tags_idx = header.index('tags')
except ValueError:
tags_idx = None
if header[0] == 'project_member_id':
metadata = load_metadata_csv_multi_user(csv_in, header, tags_idx)
elif header[0] == 'filename':
metadata = load_metadata_csv_single_user(csv_in, header, tags_idx)
return metadata
def mk_metadata_csv(filedir, outputfilepath, max_bytes=MAX_FILE_DEFAULT):
with open(outputfilepath, 'w') as outputfile:
csv_out = csv.writer(outputfile)
subdirs = [os.path.join(filedir, i) for i in os.listdir(filedir) if
os.path.isdir(os.path.join(filedir, i))]
if subdirs:
logging.info('Making metadata for subdirs of {}'.format(filedir))
if not all([re.match('^[0-9]{8}$', os.path.basename(d))
for d in subdirs]):
raise ValueError("Subdirs not all project member ID format!")
csv_out.writerow(['project_member_id', 'filename', 'tags',
'description', 'md5', 'creation_date'])
for subdir in subdirs:
file_info = characterize_local_files(
filedir=subdir, max_bytes=max_bytes)
proj_member_id = os.path.basename(subdir)
if not file_info:
csv_out.writerow([proj_member_id, 'None',
'NA', 'NA', 'NA', 'NA'])
continue
for filename in file_info:
csv_out.writerow([proj_member_id,
filename,
', '.join(file_info[filename]['tags']),
file_info[filename]['description'],
file_info[filename]['md5'],
file_info[filename]['creation_date'],
])
else:
csv_out.writerow(['filename', 'tags',
'description', 'md5', 'creation_date'])
file_info = characterize_local_files(
filedir=filedir, max_bytes=max_bytes)
for filename in file_info:
csv_out.writerow([filename,
', '.join(file_info[filename]['tags']),
file_info[filename]['description'],
file_info[filename]['md5'],
file_info[filename]['creation_date'],
])
def download_file(download_url, target_filepath, max_bytes=MAX_FILE_DEFAULT):
"""
Download a file.
"""
response = requests.get(download_url, stream=True)
size = int(response.headers['Content-Length'])
if size > max_bytes:
logging.info('Skipping {}, {} > {}'.format(
target_filepath, format_size(size), format_size(max_bytes)))
return
logging.info('Downloading {} ({})'.format(
target_filepath, format_size(size)))
if os.path.exists(target_filepath):
stat = os.stat(target_filepath)
if stat.st_size == size:
logging.info('Skipping, file exists and is the right '
'size: {}'.format(target_filepath))
return
else:
logging.info('Replacing, file exists and is the wrong '
'size: {}'.format(target_filepath))
os.remove(target_filepath)
with open(target_filepath, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
logging.info('Download complete: {}'.format(target_filepath))
def read_id_list(filepath):
if not filepath:
return None
id_list = []
with open(filepath) as f:
for line in f:
line = line.rstrip()
if not re.match('^[0-9]{8}$', line):
raise('Each line in whitelist or blacklist is expected '
'to contain an eight digit ID, and nothing else.')
else:
id_list.append(line)
return id_list
| 34.829694
| 79
| 0.547768
|
6c98d555b3b627321b1cb428ef01752bcfb01e2a
| 8,282
|
py
|
Python
|
apps/paddlefold/alphafold_paddle/model/model.py
|
kanz76/PaddleHelix
|
f31c98db1f6d396f1ed1e51ae427028dcf3b0ae9
|
[
"Apache-2.0"
] | 1
|
2022-02-14T13:13:32.000Z
|
2022-02-14T13:13:32.000Z
|
apps/paddlefold/alphafold_paddle/model/model.py
|
chupvl/PaddleHelix
|
6e082f89b8090c3c360593d40a08bffc884165dd
|
[
"Apache-2.0"
] | null | null | null |
apps/paddlefold/alphafold_paddle/model/model.py
|
chupvl/PaddleHelix
|
6e082f89b8090c3c360593d40a08bffc884165dd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import io
import time
import pickle
import logging
import pathlib
import numpy as np
import ml_collections
from copy import deepcopy
from typing import Dict, Optional
import paddle
from alphafold_paddle.model import utils
from alphafold_paddle.relax import relax
from alphafold_paddle.model import modules
from alphafold_paddle.common import residue_constants
from alphafold_paddle.data.input import input_pipeline
logger = logging.getLogger(__name__)
TARGET_FEAT_DIM = 22
MSA_FEAT_DIM = 49
def print_shape(d, level=0):
tabs = '\t' * level
for k, v in d.items():
if type(v) is dict:
print(tabs + k)
print_shape(v, level=level+1)
else:
print(tabs + f'{k}: {v.shape} {v.dtype}')
def tensor_to_numpy(pred_dict):
for k in pred_dict.keys():
if isinstance(pred_dict[k], paddle.Tensor):
pred_dict[k] = pred_dict[k].numpy()
elif type(pred_dict[k]) is dict:
tensor_to_numpy(pred_dict[k])
def slice_pred_dict(pred_dict, slice_idx, ignores=['breaks']):
for k in pred_dict.keys():
if k in ignores:
continue
if type(pred_dict[k]) is dict:
pred_dict[k] = slice_pred_dict(pred_dict[k], slice_idx,
ignores=ignores)
else:
pred_dict[k] = pred_dict[k][slice_idx]
return pred_dict
class RunModel(object):
"""Wrapper for paddle model."""
def __init__(self,
name: str,
config: ml_collections.ConfigDict,
params_path: str,
dynamic_subbatch_size: bool = True):
self.name = name
self.config = config
self.dynamic_subbatch_size = dynamic_subbatch_size
channel_num = {
'target_feat': TARGET_FEAT_DIM,
'msa_feat': MSA_FEAT_DIM,
}
self.alphafold = modules.AlphaFold(channel_num, config.model)
self.init_params(str(params_path))
self.alphafold.eval()
def init_params(self, params_path: str):
if params_path.endswith('.npz'):
logger.info('Load as AlphaFold pre-trained model')
with open(params_path, 'rb') as f:
params = np.load(io.BytesIO(f.read()), allow_pickle=False)
params = dict(params)
pd_params = utils.jax_params_to_paddle(params)
pd_params = {k[len('alphafold.'):]: v for k, v in pd_params.items()}
elif params_path.endswith('.pd'):
logger.info('Load as Paddle model')
pd_params = paddle.load(params_path)
else:
raise ValueError('Unsupported params file type')
self.alphafold.set_state_dict(pd_params)
def preprocess(self,
raw_features: Dict[str, np.ndarray],
random_seed: int,
pkl: pathlib.Path = None) -> Dict[str, paddle.Tensor]:
"""Convert raw input features to model input features"""
if pkl is not None and pkl.exists():
logger.info(f'Use cached {pkl}')
with open(pkl, 'rb') as f:
features = pickle.load(f)
print_shape(features)
return utils.map_to_tensor(features, add_batch=True)
logger.info('Processing input features')
data_config = deepcopy(self.config.data)
feature_names = data_config.common.unsupervised_features
if data_config.common.use_templates:
feature_names += data_config.common.template_features
num_residues = int(raw_features['seq_length'][0])
data_config.eval.crop_size = num_residues
if 'deletion_matrix_int' in raw_features:
raw_features['deletion_matrix'] = (raw_features.pop(
'deletion_matrix_int').astype(np.float32))
array_dict = input_pipeline.np_to_array_dict(
np_example=raw_features, features=feature_names,
use_templates=data_config.common.use_templates)
features = input_pipeline.process_arrays_from_config(
array_dict, data_config)
features = {k: v for k, v in features.items() if v.dtype != 'O'}
extra_msa_length = data_config.common.max_extra_msa
for k in ['extra_msa', 'extra_has_deletion', 'extra_deletion_value',
'extra_msa_mask']:
features[k] = features[k][:, :extra_msa_length]
for k in features.keys():
if features[k].dtype == np.int64:
features[k] = features[k].astype(np.int32)
elif features[k].dtype == np.float64:
features[k] = features[k].astype(np.float32)
if pkl is not None:
with open(pkl, 'wb') as f:
pickle.dump(features, f, protocol=4)
print_shape(features)
return utils.map_to_tensor(features, add_batch=True)
def predict(self,
feat: Dict[str, paddle.Tensor],
ensemble_representations: bool = True,
return_representations: bool = True):
"""Predict protein structure and encoding representation"""
if self.dynamic_subbatch_size:
seq_len = feat['aatype'].shape[-1]
extra_msa_num = feat['extra_msa'].shape[-2]
self.update_subbatch_size(seq_len, extra_msa_num)
with paddle.no_grad():
ret = self.alphafold(
feat,
ensemble_representations=ensemble_representations,
return_representations=return_representations)
tensor_to_numpy(ret)
return ret
def postprocess(self,
aatype: np.ndarray,
residue_index: np.ndarray,
relaxer: relax.AmberRelaxation,
prediction: Dict[str, np.ndarray],
output_dir: pathlib.Path,
slice_idx: int = 0,
timings: Optional[Dict[str, float]] = None):
"""Compute pLDDT, save unrelaxed pdb and execute relaxation"""
single_pred = slice_pred_dict(prediction, slice_idx)
prediction.update(utils.get_confidence_metrics(single_pred))
plddt = prediction['plddt']
logger.info(f'{self.name} average pLDDT: {np.mean(plddt)}')
if 'max_predicted_aligned_error' in prediction:
err = prediction['max_predicted_aligned_error']
logger.info(f'{self.name} max predicted aligned error: {err}')
with open(output_dir.joinpath(f'result_{self.name}.pkl'), 'wb') as f:
pickle.dump(prediction, f, protocol=4)
plddt_b_factors = np.repeat(
plddt[:, None], residue_constants.atom_type_num, axis=-1)
prot = utils.generate_unrelaxed_pdb(
aatype, residue_index, single_pred,
output_dir.joinpath(f'unrelaxed_{self.name}.pdb'),
b_factors=plddt_b_factors)
t0 = time.time()
relaxed_pdb_str = relaxer.process(prot=prot)[0]
if timings is not None:
timings[f'relax_{self.name}'] = time.time() - t0
with open(output_dir.joinpath(f'relaxed_{self.name}.pdb'), 'w') as f:
f.write(relaxed_pdb_str)
return relaxed_pdb_str
def update_subbatch_size(self, seq_len, extra_msa_num):
if extra_msa_num == 5120:
if seq_len < 200:
# disable subbatch
self.alphafold.global_config.subbatch_size = 5120
elif extra_msa_num == 1024:
if seq_len < 600:
# disable subbatch
self.alphafold.global_config.subbatch_size = 1024
else:
raise ValueError('Unknown subbatch strategy')
| 35.393162
| 80
| 0.618933
|
2f5a80b60ebf4a90edaff3abbc55a92283da9ed7
| 1,556
|
py
|
Python
|
setup.py
|
jsandovalc/django-cities-light
|
a1c6af08938b7b01d4e12555bd4cb5040905603d
|
[
"MIT"
] | null | null | null |
setup.py
|
jsandovalc/django-cities-light
|
a1c6af08938b7b01d4e12555bd4cb5040905603d
|
[
"MIT"
] | null | null | null |
setup.py
|
jsandovalc/django-cities-light
|
a1c6af08938b7b01d4e12555bd4cb5040905603d
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
import shutil
import sys
import os
import os.path
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-cities-light',
version='3.7.0',
description='Simple alternative to django-cities',
author='James Pic,Dominick Rivard,Alexey Evseev',
author_email='jamespic@gmail.com, dominick.rivard@gmail.com, myhappydo@gmail.com',
url='https://github.com/yourlabs/django-cities-light',
packages=['cities_light'],
include_package_data=True,
zip_safe=False,
long_description=read('README.rst'),
license='MIT',
keywords='django cities countries postal codes',
install_requires=[
'pytz',
'unidecode>=0.04.13',
'django-autoslug>=1.9.8',
'progressbar2>=3.51.4'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 32.416667
| 86
| 0.654242
|
0b36a23da3938dd6a58c332d22bc21433cd520a7
| 2,949
|
py
|
Python
|
hardware/max7219.py
|
gcurtis79/letsrobot
|
0cb5fae07392ee3661036d138d8986c9705bcf0c
|
[
"Apache-2.0"
] | 26
|
2018-09-27T17:27:30.000Z
|
2022-03-04T20:37:18.000Z
|
hardware/max7219.py
|
gcurtis79/letsrobot
|
0cb5fae07392ee3661036d138d8986c9705bcf0c
|
[
"Apache-2.0"
] | 30
|
2018-10-15T03:54:58.000Z
|
2020-05-28T06:57:08.000Z
|
hardware/max7219.py
|
gcurtis79/letsrobot
|
0cb5fae07392ee3661036d138d8986c9705bcf0c
|
[
"Apache-2.0"
] | 16
|
2018-10-04T03:16:43.000Z
|
2021-04-25T06:59:49.000Z
|
import spidev
columns = [0x1,0x2,0x3,0x4,0x5,0x6,0x7,0x8]
LEDOn = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]
LEDOff = [0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]
LEDEmoteSmile = [0x0,0x0,0x24,0x0,0x42,0x3C,0x0,0x0]
LEDEmoteSad = [0x0,0x0,0x24,0x0,0x0,0x3C,0x42,0x0]
LEDEmoteTongue = [0x0,0x0,0x24,0x0,0x42,0x3C,0xC,0x0]
LEDEmoteSurprise = [0x0,0x0,0x24,0x0,0x18,0x24,0x24,0x18]
spi = None
def setup(robot_config):
global LEDEmoteSmile
global LEDEmoteSad
global LEDEmoteTongue
global LEDEmoteSuprise
global module
global spi
#LED controlling
spi = spidev.SpiDev()
spi.open(0,0)
#VCC -> RPi Pin 2
#GND -> RPi Pin 6
#DIN -> RPi Pin 19
#CLK -> RPi Pin 23
#CS -> RPi Pin 24
# decoding:BCD
spi.writebytes([0x09])
spi.writebytes([0x00])
# Start with low brightness
spi.writebytes([0x0a])
spi.writebytes([0x03])
# scanlimit; 8 LEDs
spi.writebytes([0x0b])
spi.writebytes([0x07])
# Enter normal power-mode
spi.writebytes([0x0c])
spi.writebytes([0x01])
# Activate display
spi.writebytes([0x0f])
spi.writebytes([0x00])
rotate = robot_config.getint('max7219', 'ledrotate')
if rotate == 180:
LEDEmoteSmile = LEDEmoteSmile[::-1]
LEDEmoteSad = LEDEmoteSad[::-1]
LEDEmoteTongue = LEDEmoteTongue[::-1]
LEDEmoteSurprise = LEDEmoteSurprise[::-1]
SetLED_Off()
def SetLED_On():
for i in range(len(columns)):
spi.xfer([columns[i],LEDOn[i]])
def SetLED_Off():
for i in range(len(columns)):
spi.xfer([columns[i],LEDOff[i]])
def SetLED_E_Smiley():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteSmile[i]])
def SetLED_E_Sad():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteSad[i]])
def SetLED_E_Tongue():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteTongue[i]])
def SetLED_E_Surprised():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteSurprise[i]])
def SetLED_Low():
# brightness MIN
spi.writebytes([0x0a])
spi.writebytes([0x00])
def SetLED_Med():
#brightness MED
spi.writebytes([0x0a])
spi.writebytes([0x06])
def SetLED_Full():
# brightness MAX
spi.writebytes([0x0a])
spi.writebytes([0x0F])
def move(args):
command = args['command']
if command == 'LED_OFF':
SetLED_Off()
if command == 'LED_FULL':
SetLED_On()
SetLED_Full()
if command == 'LED_MED':
SetLED_On()
SetLED_Med()
if command == 'LED_LOW':
SetLED_On()
SetLED_Low()
if command == 'LED_E_SMILEY':
SetLED_On()
SetLED_E_Smiley()
if command == 'LED_E_SAD':
SetLED_On()
SetLED_E_Sad()
if command == 'LED_E_TONGUE':
SetLED_On()
SetLED_E_Tongue()
if command == 'LED_E_SURPRISED':
SetLED_On()
SetLED_E_Suprised()
| 25.205128
| 57
| 0.617158
|
c22ac0006075217af5838e8cf251c06a472a6201
| 725
|
py
|
Python
|
tests/test_html.py
|
joshuadavidthomas/django_coverage_plugin
|
c4ec0691906dc0923c494efc9c9236d3aa21be73
|
[
"Apache-2.0"
] | 172
|
2015-01-03T20:26:42.000Z
|
2022-02-18T20:38:59.000Z
|
tests/test_html.py
|
joshuadavidthomas/django_coverage_plugin
|
c4ec0691906dc0923c494efc9c9236d3aa21be73
|
[
"Apache-2.0"
] | 71
|
2015-01-17T19:22:53.000Z
|
2022-02-03T09:09:35.000Z
|
tests/test_html.py
|
BillSchumacher/django_coverage_plugin
|
597f07d60fffe05b9062b6c41afa722a4b32f3ba
|
[
"Apache-2.0"
] | 31
|
2015-01-18T14:32:46.000Z
|
2022-02-18T20:39:07.000Z
|
# coding: utf8
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/django_coverage_plugin/blob/master/NOTICE.txt
"""Tests of HTML reporting for django_coverage_plugin."""
import glob
from .plugin_test import DjangoPluginTestCase
class HtmlTest(DjangoPluginTestCase):
def test_simple(self):
self.make_template("""\
Simple © 2015
""")
self.run_django_coverage()
self.cov.html_report()
html_file = glob.glob("htmlcov/*_test_simple_html.html")[0]
with open(html_file) as fhtml:
html = fhtml.read()
self.assertIn('<span class="txt">Simple © 2015</span>', html)
| 29
| 86
| 0.670345
|
8067806a8d4ad5056d84ffb99227c093f713e824
| 394
|
py
|
Python
|
send.py
|
chhantyal/http_recv
|
fed31fe1a1f8f31d9e117fa00c9d303bfbcb752e
|
[
"BSD-3-Clause"
] | null | null | null |
send.py
|
chhantyal/http_recv
|
fed31fe1a1f8f31d9e117fa00c9d303bfbcb752e
|
[
"BSD-3-Clause"
] | null | null | null |
send.py
|
chhantyal/http_recv
|
fed31fe1a1f8f31d9e117fa00c9d303bfbcb752e
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from logging.handlers import HTTPHandler
logger = logging.getLogger(__name__)
server = 'https://log-recv.herokuapp.com:80'
http_handler = HTTPHandler(server, '/', method='POST')
logger.addHandler(http_handler)
logger.setLevel(logging.DEBUG)
if __name__ == "__main__":
logger.debug("Debug message logged to remote server")
print("Logs sent to {}:)".format(server))
| 21.888889
| 57
| 0.741117
|
c4c0de2f46112431a484ca1a04012527bb955b2e
| 20,072
|
py
|
Python
|
txweb2/http.py
|
grobza/ccs-calendarserver
|
d26ff131bc46057893dc3abbd3830a6dc0a73738
|
[
"Apache-2.0"
] | null | null | null |
txweb2/http.py
|
grobza/ccs-calendarserver
|
d26ff131bc46057893dc3abbd3830a6dc0a73738
|
[
"Apache-2.0"
] | null | null | null |
txweb2/http.py
|
grobza/ccs-calendarserver
|
d26ff131bc46057893dc3abbd3830a6dc0a73738
|
[
"Apache-2.0"
] | null | null | null |
# -*- test-case-name: txweb2.test.test_http -*-
##
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##
"""HyperText Transfer Protocol implementation.
The second coming.
Maintainer: James Y Knight
"""
# import traceback; log.info(''.join(traceback.format_stack()))
import json
import time
from twisted.internet import interfaces, error
from twisted.python import components
from twisted.web.template import Element, XMLString, renderer, flattenString
from zope.interface import implements
from twext.python.log import Logger
from txweb2 import responsecode
from txweb2 import http_headers
from txweb2 import iweb
from txweb2 import stream
from txweb2.stream import IByteStream, readAndDiscard
log = Logger()
defaultPortForScheme = {'http': 80, 'https': 443, 'ftp': 21}
def splitHostPort(scheme, hostport):
"""Split the host in "host:port" format into host and port fields.
If port was not specified, use the default for the given scheme, if
known. Returns a tuple of (hostname, portnumber)."""
# Split hostport into host and port
hostport = hostport.split(':', 1)
try:
if len(hostport) == 2:
return hostport[0], int(hostport[1])
except ValueError:
pass
return hostport[0], defaultPortForScheme.get(scheme, 0)
def parseVersion(strversion):
"""Parse version strings of the form Protocol '/' Major '.' Minor. E.g. 'HTTP/1.1'.
Returns (protocol, major, minor).
Will raise ValueError on bad syntax."""
proto, strversion = strversion.split('/')
major, minor = strversion.split('.')
major, minor = int(major), int(minor)
if major < 0 or minor < 0:
raise ValueError("negative number")
return (proto.lower(), major, minor)
class HTTPError(Exception):
def __init__(self, codeOrResponse):
"""An Exception for propagating HTTP Error Responses.
@param codeOrResponse: The numeric HTTP code or a complete http.Response
object.
@type codeOrResponse: C{int} or L{http.Response}
"""
self.response = iweb.IResponse(codeOrResponse)
Exception.__init__(self, str(self.response))
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.response)
class Response(object):
"""An object representing an HTTP Response to be sent to the client.
"""
implements(iweb.IResponse)
code = responsecode.OK
headers = None
stream = None
def __init__(self, code=None, headers=None, stream=None):
"""
@param code: The HTTP status code for this Response
@type code: C{int}
@param headers: Headers to be sent to the client.
@type headers: C{dict}, L{txweb2.http_headers.Headers}, or
C{None}
@param stream: Content body to send to the HTTP client
@type stream: L{txweb2.stream.IByteStream}
"""
if code is not None:
self.code = int(code)
if headers is not None:
if isinstance(headers, dict):
headers = http_headers.Headers(headers)
self.headers = headers
else:
self.headers = http_headers.Headers()
if stream is not None:
self.stream = IByteStream(stream)
def __repr__(self):
if self.stream is None:
streamlen = None
else:
streamlen = self.stream.length
return "<%s.%s code=%d, streamlen=%s>" % (self.__module__, self.__class__.__name__, self.code, streamlen)
class StatusResponseElement(Element):
"""
Render the HTML for a L{StatusResponse}
"""
loader = XMLString("""<html
xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1"
t:render="response"><head><title><t:slot name="title"
/></title></head><body><h1><t:slot name="title"
/></h1><p><t:slot name="description"
/></p></body></html>""")
def __init__(self, title, description):
super(StatusResponseElement, self).__init__()
self.title = title
self.description = description
@renderer
def response(self, request, tag):
"""
Top-level renderer.
"""
return tag.fillSlots(title=self.title, description=self.description)
class StatusResponse (Response):
"""
A L{Response} object which simply contains a status code and a description
of what happened.
"""
def __init__(self, code, description, title=None):
"""
@param code: a response code in L{responsecode.RESPONSES}.
@param description: a string description.
@param title: the message title. If not specified or C{None}, defaults
to C{responsecode.RESPONSES[code]}.
"""
if title is None:
title = responsecode.RESPONSES[code]
element = StatusResponseElement(title, description)
out = []
flattenString(None, element).addCallback(out.append)
mime_params = {"charset": "utf-8"}
super(StatusResponse, self).__init__(code=code, stream=out[0])
self.headers.setHeader(
"content-type", http_headers.MimeType("text", "html", mime_params)
)
self.description = description
def __repr__(self):
return "<%s %s %s>" % (self.__class__.__name__, self.code, self.description)
class RedirectResponse (StatusResponse):
"""
A L{Response} object that contains a redirect to another network location.
"""
def __init__(self, location, temporary=False):
"""
@param location: the URI to redirect to.
@param temporary: whether it's a temporary redirect or permanent
"""
code = responsecode.TEMPORARY_REDIRECT if temporary else responsecode.MOVED_PERMANENTLY
super(RedirectResponse, self).__init__(
code,
"Document moved to %s." % (location,)
)
self.headers.setHeader("location", location)
def NotModifiedResponse(oldResponse=None):
if oldResponse is not None:
headers = http_headers.Headers()
for header in (
# Required from sec 10.3.5:
'date', 'etag', 'content-location', 'expires',
'cache-control', 'vary',
# Others:
'server', 'proxy-authenticate', 'www-authenticate', 'warning'
):
value = oldResponse.headers.getRawHeaders(header)
if value is not None:
headers.setRawHeaders(header, value)
else:
headers = None
return Response(code=responsecode.NOT_MODIFIED, headers=headers)
def checkPreconditions(request, response=None, entityExists=True, etag=None, lastModified=None):
"""Check to see if this request passes the conditional checks specified
by the client. May raise an HTTPError with result codes L{NOT_MODIFIED}
or L{PRECONDITION_FAILED}, as appropriate.
This function is called automatically as an output filter for GET and
HEAD requests. With GET/HEAD, it is not important for the precondition
check to occur before doing the action, as the method is non-destructive.
However, if you are implementing other request methods, like PUT
for your resource, you will need to call this after determining
the etag and last-modified time of the existing resource but
before actually doing the requested action. In that case,
This examines the appropriate request headers for conditionals,
(If-Modified-Since, If-Unmodified-Since, If-Match, If-None-Match,
or If-Range), compares with the etag and last and
and then sets the response code as necessary.
@param response: This should be provided for GET/HEAD methods. If
it is specified, the etag and lastModified arguments will
be retrieved automatically from the response headers and
shouldn't be separately specified. Not providing the
response with a GET request may cause the emitted
"Not Modified" responses to be non-conformant.
@param entityExists: Set to False if the entity in question doesn't
yet exist. Necessary for PUT support with 'If-None-Match: *'.
@param etag: The etag of the resource to check against, or None.
@param lastModified: The last modified date of the resource to check
against, or None.
@raise: HTTPError: Raised when the preconditions fail, in order to
abort processing and emit an error page.
"""
if response:
assert etag is None and lastModified is None
# if the code is some sort of error code, don't do anything
if not ((response.code >= 200 and response.code <= 299) or
response.code == responsecode.PRECONDITION_FAILED):
return False
etag = response.headers.getHeader("etag")
lastModified = response.headers.getHeader("last-modified")
def matchETag(tags, allowWeak):
if entityExists and '*' in tags:
return True
if etag is None:
return False
return ((allowWeak or not etag.weak) and
([etagmatch for etagmatch in tags if etag.match(etagmatch, strongCompare=not allowWeak)]))
# First check if-match/if-unmodified-since
# If either one fails, we return PRECONDITION_FAILED
match = request.headers.getHeader("if-match")
if match:
if not matchETag(match, False):
raise HTTPError(StatusResponse(responsecode.PRECONDITION_FAILED, "Requested resource does not have a matching ETag."))
unmod_since = request.headers.getHeader("if-unmodified-since")
if unmod_since:
if not lastModified or lastModified > unmod_since:
raise HTTPError(StatusResponse(responsecode.PRECONDITION_FAILED, "Requested resource has changed."))
# Now check if-none-match/if-modified-since.
# This bit is tricky, because of the requirements when both IMS and INM
# are present. In that case, you can't return a failure code
# unless *both* checks think it failed.
# Also, if the INM check succeeds, ignore IMS, because INM is treated
# as more reliable.
# I hope I got the logic right here...the RFC is quite poorly written
# in this area. Someone might want to verify the testcase against
# RFC wording.
# If IMS header is later than current time, ignore it.
notModified = None
ims = request.headers.getHeader('if-modified-since')
if ims:
notModified = (ims < time.time() and lastModified and lastModified <= ims)
inm = request.headers.getHeader("if-none-match")
if inm:
if request.method in ("HEAD", "GET"):
# If it's a range request, don't allow a weak ETag, as that
# would break.
canBeWeak = not request.headers.hasHeader('Range')
if notModified != False and matchETag(inm, canBeWeak):
raise HTTPError(NotModifiedResponse(response))
else:
if notModified != False and matchETag(inm, False):
raise HTTPError(StatusResponse(responsecode.PRECONDITION_FAILED, "Requested resource has a matching ETag."))
else:
if notModified == True:
raise HTTPError(NotModifiedResponse(response))
def checkIfRange(request, response):
"""Checks for the If-Range header, and if it exists, checks if the
test passes. Returns true if the server should return partial data."""
ifrange = request.headers.getHeader("if-range")
if ifrange is None:
return True
if isinstance(ifrange, http_headers.ETag):
return ifrange.match(response.headers.getHeader("etag"), strongCompare=True)
else:
return ifrange == response.headers.getHeader("last-modified")
class _NotifyingProducerStream(stream.ProducerStream):
doStartReading = None
def __init__(self, length=None, doStartReading=None):
stream.ProducerStream.__init__(self, length=length)
self.doStartReading = doStartReading
def read(self):
if self.doStartReading is not None:
doStartReading = self.doStartReading
self.doStartReading = None
doStartReading()
return stream.ProducerStream.read(self)
def write(self, data):
self.doStartReading = None
stream.ProducerStream.write(self, data)
def finish(self):
self.doStartReading = None
stream.ProducerStream.finish(self)
# response codes that must have empty bodies
NO_BODY_CODES = (responsecode.NO_CONTENT, responsecode.NOT_MODIFIED)
class Request(object):
"""A HTTP request.
Subclasses should override the process() method to determine how
the request will be processed.
@ivar method: The HTTP method that was used.
@ivar uri: The full URI that was requested (includes arguments).
@ivar headers: All received headers
@ivar clientproto: client HTTP version
@ivar stream: incoming data stream.
"""
implements(iweb.IRequest, interfaces.IConsumer)
known_expects = ('100-continue',)
def __init__(self, chanRequest, command, path, version, contentLength, headers):
"""
@param chanRequest: the channel request we're associated with.
"""
self.chanRequest = chanRequest
self.method = command
self.uri = path
self.clientproto = version
self.headers = headers
if '100-continue' in self.headers.getHeader('expect', ()):
doStartReading = self._sendContinue
else:
doStartReading = None
self.stream = _NotifyingProducerStream(contentLength, doStartReading)
self.stream.registerProducer(self.chanRequest, True)
def checkExpect(self):
"""Ensure there are no expectations that cannot be met.
Checks Expect header against self.known_expects."""
expects = self.headers.getHeader('expect', ())
for expect in expects:
if expect not in self.known_expects:
raise HTTPError(responsecode.EXPECTATION_FAILED)
def process(self):
"""Called by channel to let you process the request.
Can be overridden by a subclass to do something useful."""
pass
def handleContentChunk(self, data):
"""Callback from channel when a piece of data has been received.
Puts the data in .stream"""
self.stream.write(data)
def handleContentComplete(self):
"""Callback from channel when all data has been received. """
self.stream.unregisterProducer()
self.stream.finish()
def connectionLost(self, reason):
"""connection was lost"""
pass
def __repr__(self):
return '<%s %s %s>' % (self.method, self.uri, self.clientproto)
def _sendContinue(self):
self.chanRequest.writeIntermediateResponse(responsecode.CONTINUE)
def _reallyFinished(self, x):
"""We are finished writing data."""
self.chanRequest.finish()
def _finished(self, x):
"""
We are finished writing data.
But we need to check that we have also finished reading all data as we
might have sent a, for example, 401 response before we read any data.
To make sure that the stream/producer sequencing works properly we need
to discard the remaining data in the request.
"""
if self.stream.length != 0:
return readAndDiscard(self.stream).addCallback(self._reallyFinished).addErrback(self._error)
else:
self._reallyFinished(x)
def _error(self, reason):
if reason.check(error.ConnectionLost):
log.info("Request error: {msg}", msg=reason.getErrorMessage())
else:
log.failure("Request error", reason)
# Only bother with cleanup on errors other than lost connection.
self.chanRequest.abortConnection()
def writeResponse(self, response):
"""
Write a response.
"""
if self.stream.doStartReading is not None:
# Expect: 100-continue was requested, but 100 response has not been
# sent, and there's a possibility that data is still waiting to be
# sent.
#
# Ideally this means the remote side will not send any data.
# However, because of compatibility requirements, it might timeout,
# and decide to do so anyways at the same time we're sending back
# this response. Thus, the read state is unknown after this.
# We must close the connection.
self.chanRequest.channel.setReadPersistent(False)
# Nothing more will be read
self.chanRequest.allContentReceived()
if response.code != responsecode.NOT_MODIFIED:
# Not modified response is *special* and doesn't get a content-length.
if response.stream is None:
response.headers.setHeader('content-length', 0)
elif response.stream.length is not None:
response.headers.setHeader('content-length', response.stream.length)
self.chanRequest.writeHeaders(response.code, response.headers)
# if this is a "HEAD" request, or a special response code,
# don't return any data.
if self.method == "HEAD" or response.code in NO_BODY_CODES:
if response.stream is not None:
response.stream.close()
self._finished(None)
return
log.info("Response:\n{msg}", msg=response)
d = stream.StreamProducer(response.stream).beginProducing(self.chanRequest)
d.addCallback(self._finished).addErrback(self._error)
class XMLResponse (Response):
"""
XML L{Response} object.
Renders itself as an XML document.
"""
def __init__(self, code, element):
"""
@param xml_responses: an iterable of davxml.Response objects.
"""
Response.__init__(self, code, stream=element.toxml())
self.headers.setHeader("content-type", http_headers.MimeType("text", "xml"))
class JSONResponse (Response):
"""
JSON L{Response} object.
Renders itself as an JSON document.
"""
def __init__(self, code, jobj, contentType="application/json", pretty=False):
"""
@param jobj: a Python object that can be serialized to JSON.
"""
kwargs = {}
if pretty:
kwargs["indent"] = 2
kwargs["separators"] = (',', ':')
Response.__init__(self, code, stream=json.dumps(jobj, **kwargs))
self.headers.setHeader("content-type", http_headers.MimeType(*contentType.split("/")))
components.registerAdapter(Response, int, iweb.IResponse)
__all__ = ['HTTPError', 'NotModifiedResponse', 'Request', 'Response', 'StatusResponse', 'RedirectResponse', 'checkIfRange', 'checkPreconditions', 'defaultPortForScheme', 'parseVersion', 'splitHostPort', "XMLResponse", "JSONResponse"]
| 36.56102
| 233
| 0.658679
|
81f3b46c52400f8adb6305d405b548f8b6820602
| 84
|
py
|
Python
|
tests/snippets/weakrefs.py
|
EdwardSam4/RustPython
|
7ec6a7ffd1afbfe71322cbace161483b6877d4a5
|
[
"MIT"
] | 1
|
2021-03-08T02:19:16.000Z
|
2021-03-08T02:19:16.000Z
|
tests/snippets/weakrefs.py
|
EdwardSam4/RustPython
|
7ec6a7ffd1afbfe71322cbace161483b6877d4a5
|
[
"MIT"
] | null | null | null |
tests/snippets/weakrefs.py
|
EdwardSam4/RustPython
|
7ec6a7ffd1afbfe71322cbace161483b6877d4a5
|
[
"MIT"
] | 1
|
2020-12-04T23:52:08.000Z
|
2020-12-04T23:52:08.000Z
|
from _weakref import ref
class X:
pass
a = X()
b = ref(a)
assert b() is a
| 6.461538
| 24
| 0.571429
|
12f36671506cd9f5fdbc26026fd5da48899163e0
| 216
|
py
|
Python
|
get_last_handle.py
|
sara-nl/surfsara-handle-client-cpp
|
511441fa42b6924dd8c1e0c287ac6adc167621be
|
[
"Apache-2.0"
] | null | null | null |
get_last_handle.py
|
sara-nl/surfsara-handle-client-cpp
|
511441fa42b6924dd8c1e0c287ac6adc167621be
|
[
"Apache-2.0"
] | 2
|
2019-02-23T21:07:37.000Z
|
2019-02-23T21:29:50.000Z
|
get_last_handle.py
|
sara-nl/surfsara-handle-client-cpp
|
511441fa42b6924dd8c1e0c287ac6adc167621be
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os.path
import json
if os.path.isfile('last_handle.json'):
with open('last_handle.json') as f:
data = json.load(f)
print(str(data.get('handle')))
else:
print("")
| 19.636364
| 39
| 0.625
|
25e198105d8453ed29889266e4213eda12ddd209
| 4,606
|
py
|
Python
|
prediction_server/prediction_server_pb2_grpc.py
|
TheFebrin/thesis-normals-estimation
|
43c2b9f902b93ec8eace610bb386d190a58eb4e3
|
[
"MIT"
] | null | null | null |
prediction_server/prediction_server_pb2_grpc.py
|
TheFebrin/thesis-normals-estimation
|
43c2b9f902b93ec8eace610bb386d190a58eb4e3
|
[
"MIT"
] | null | null | null |
prediction_server/prediction_server_pb2_grpc.py
|
TheFebrin/thesis-normals-estimation
|
43c2b9f902b93ec8eace610bb386d190a58eb4e3
|
[
"MIT"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import prediction_server_pb2 as prediction__server__pb2
class PredictionServerStub(object):
"""option java_multiple_files = true;
option java_package = "io.grpc.examples.helloworld";
option java_outer_classname = "HelloWorldProto";
option objc_class_prefix = "HLW";
package helloworld;
The greeting service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ping = channel.unary_unary(
'/PredictionServer/ping',
request_serializer=prediction__server__pb2.PingRequest.SerializeToString,
response_deserializer=prediction__server__pb2.PingResponse.FromString,
)
self.predict = channel.unary_unary(
'/PredictionServer/predict',
request_serializer=prediction__server__pb2.PredictionRequest.SerializeToString,
response_deserializer=prediction__server__pb2.PredictionResponse.FromString,
)
class PredictionServerServicer(object):
"""option java_multiple_files = true;
option java_package = "io.grpc.examples.helloworld";
option java_outer_classname = "HelloWorldProto";
option objc_class_prefix = "HLW";
package helloworld;
The greeting service definition.
"""
def ping(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def predict(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PredictionServerServicer_to_server(servicer, server):
rpc_method_handlers = {
'ping': grpc.unary_unary_rpc_method_handler(
servicer.ping,
request_deserializer=prediction__server__pb2.PingRequest.FromString,
response_serializer=prediction__server__pb2.PingResponse.SerializeToString,
),
'predict': grpc.unary_unary_rpc_method_handler(
servicer.predict,
request_deserializer=prediction__server__pb2.PredictionRequest.FromString,
response_serializer=prediction__server__pb2.PredictionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'PredictionServer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class PredictionServer(object):
"""option java_multiple_files = true;
option java_package = "io.grpc.examples.helloworld";
option java_outer_classname = "HelloWorldProto";
option objc_class_prefix = "HLW";
package helloworld;
The greeting service definition.
"""
@staticmethod
def ping(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/PredictionServer/ping',
prediction__server__pb2.PingRequest.SerializeToString,
prediction__server__pb2.PingResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def predict(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/PredictionServer/predict',
prediction__server__pb2.PredictionRequest.SerializeToString,
prediction__server__pb2.PredictionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 37.145161
| 101
| 0.673469
|
d874732d6fa0068f99de1babbacae1e129a6c2cb
| 14,127
|
py
|
Python
|
python/tvm/relay/backend/compile_engine.py
|
maxtnuk/incubator-tvm
|
050a836b18c419213f34b8ac76afced425d9d70e
|
[
"Apache-2.0"
] | 2
|
2019-11-13T01:17:41.000Z
|
2020-05-15T19:06:52.000Z
|
python/tvm/relay/backend/compile_engine.py
|
maxtnuk/incubator-tvm
|
050a836b18c419213f34b8ac76afced425d9d70e
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relay/backend/compile_engine.py
|
maxtnuk/incubator-tvm
|
050a836b18c419213f34b8ac76afced425d9d70e
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=len-as-condition,no-else-return,invalid-name
"""Backend code generation engine."""
from __future__ import absolute_import
import logging
import numpy as np
import tvm
from tvm import te, autotvm, auto_scheduler
from tvm.runtime import Object
from tvm.support import libinfo
from tvm.target import Target
from .. import function as _function
from .. import ty as _ty
from . import _backend
logger = logging.getLogger("compile_engine")
autotvm_logger = logging.getLogger("autotvm")
@tvm._ffi.register_object("relay.LoweredOutput")
class LoweredOutput(Object):
"""Lowered output"""
def __init__(self, outputs, implement):
self.__init_handle_by_constructor__(_backend._make_LoweredOutput, outputs, implement)
@tvm._ffi.register_object("relay.CCacheKey")
class CCacheKey(Object):
"""Key in the CompileEngine.
Parameters
----------
source_func : tvm.relay.Function
The source function.
target : tvm.Target
The target we want to run the function on.
"""
def __init__(self, source_func, target):
self.__init_handle_by_constructor__(_backend._make_CCacheKey, source_func, target)
@tvm._ffi.register_object("relay.CCacheValue")
class CCacheValue(Object):
"""Value in the CompileEngine, including usage statistics."""
def _get_cache_key(source_func, target):
if isinstance(source_func, _function.Function):
if isinstance(target, str):
target = Target(target)
if not target:
raise ValueError("Need target when source_func is a Function")
return CCacheKey(source_func, target)
if not isinstance(source_func, CCacheKey):
raise TypeError("Expect source_func to be CCacheKey")
return source_func
def get_shape(shape):
"""Convert the shape to correct dtype and vars."""
ret = []
for dim in shape:
if isinstance(dim, tvm.tir.IntImm):
if libinfo()["INDEX_DEFAULT_I64"] == "ON":
ret.append(dim)
else:
val = int(dim)
assert val <= np.iinfo(np.int32).max
ret.append(tvm.tir.IntImm("int32", val))
elif isinstance(dim, tvm.tir.Any):
ret.append(te.var("any_dim", "int32"))
else:
ret.append(dim)
return ret
def get_valid_implementations(op, attrs, inputs, out_type, target):
"""Get all valid implementations from the op strategy.
Note that this function doesn't support op with symbolic input shapes.
Parameters
----------
op : tvm.ir.Op
Relay operator.
attrs : object
The op attribute.
inputs : List[tvm.te.Tensor]
Input tensors to the op.
out_type : relay.Type
The output type.
target : tvm.target.Target
The target to compile the op.
Returns
-------
ret : List[relay.op.OpImplementation]
The list of all valid op implementations.
"""
fstrategy = op.get_attr("FTVMStrategy")
assert fstrategy is not None, "%s doesn't have FTVMStrategy registered" % op.name
with target:
strategy = fstrategy(attrs, inputs, out_type, target)
analyzer = tvm.arith.Analyzer()
ret = []
for spec in strategy.specializations:
if spec.condition:
# check if all the clauses in the specialized condition are true
flag = True
for clause in spec.condition.clauses:
clause = analyzer.canonical_simplify(clause)
if isinstance(clause, tvm.tir.IntImm) and clause.value:
continue
flag = False
break
if flag:
for impl in spec.implementations:
ret.append(impl)
else:
for impl in spec.implementations:
ret.append(impl)
return ret
def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True):
"""Select the best implementation from the op strategy.
If use_autotvm is True, it'll first try to find the best implementation
based on AutoTVM profile results. If no AutoTVM profile result is found,
it'll choose the implementation with highest plevel.
If use_autotvm is False, it'll directly choose the implementation with
highest plevel.
Note that this function doesn't support op with symbolic input shapes.
Parameters
----------
op : tvm.ir.Op
Relay operator.
attrs : object
The op attribute.
inputs : List[tvm.te.Tensor]
Input tensors to the op.
out_type : relay.Type
The output type.
target : tvm.target.Target
The target to compile the op.
use_autotvm : bool
Whether query AutoTVM to pick the best.
Returns
-------
ret : tuple(relay.op.OpImplementation, List[tvm.te.Tensor])
The best op implementation and the corresponding output tensors.
"""
all_impls = get_valid_implementations(op, attrs, inputs, out_type, target)
best_plevel_impl = max(all_impls, key=lambda x: x.plevel)
# If not use autotvm, always return the implementation with the highest priority
if not use_autotvm:
logger.info(
"Using %s for %s based on highest priority (%d)",
best_plevel_impl.name,
op.name,
best_plevel_impl.plevel,
)
outs = best_plevel_impl.compute(attrs, inputs, out_type)
return best_plevel_impl, outs
# If auto-scheduler is enabled for Relay, always prefer auto-scheduler
if auto_scheduler.is_relay_integration_enabled():
auto_scheduler_impls = []
for impl in all_impls:
if impl.name.endswith(auto_scheduler.relay_integration.auto_schedule_impl_suffix):
auto_scheduler_impls.append(impl)
if auto_scheduler_impls:
assert len(auto_scheduler_impls) == 1
impl = auto_scheduler_impls[0]
outs = impl.compute(attrs, inputs, out_type)
return impl, outs
# Otherwise, try autotvm templates
outputs = {}
workloads = {}
best_autotvm_impl = None
best_cfg = None
dispatch_ctx = autotvm.task.DispatchContext.current
autotvm.GLOBAL_SCOPE.silent = True
for impl in all_impls:
outs = impl.compute(attrs, inputs, out_type)
outputs[impl] = outs
workload = autotvm.task.get_workload(outs)
workloads[impl] = workload
if workload is None:
# Not an AutoTVM tunable implementation
continue
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback:
# Skip fallback config
continue
logger.info("Implementation %s for %s has cost %.2e", impl.name, op.name, cfg.cost)
if best_cfg is None or best_cfg.cost > cfg.cost:
best_autotvm_impl = impl
best_cfg = cfg
autotvm.GLOBAL_SCOPE.silent = False
if best_autotvm_impl:
# The best autotvm implementation definitely doesn't use fallback config
logger.info(
"Using %s for %s based on lowest cost (%.2e)",
best_autotvm_impl.name,
op.name,
best_cfg.cost,
)
return best_autotvm_impl, outputs[best_autotvm_impl]
# Use the implementation with highest plevel
if workloads[best_plevel_impl] is not None:
msg = (
"Cannot find config for target=%s, workload=%s. A fallback configuration "
"is used, which may bring great performance regression."
% (target, workloads[best_plevel_impl])
)
if msg not in autotvm.task.DispatchContext.warning_messages:
autotvm.task.DispatchContext.warning_messages.add(msg)
autotvm_logger.warning(msg)
logger.info(
"Using %s for %s based on highest priority (%s)",
best_plevel_impl.name,
op.name,
best_plevel_impl.plevel,
)
return best_plevel_impl, outputs[best_plevel_impl]
@tvm._ffi.register_func("relay.backend.lower_call")
def lower_call(call, inputs, target):
"""Lower the call expression to op implementation and tensor outputs."""
assert isinstance(call.op, tvm.ir.Op)
op = call.op
# Prepare the call_node->checked_type(). For the call node inputs, we ensure that
# the shape is Int32. Following code ensures the same for the output as well.
# TODO(@icemelon9): Support recursive tuple
ret_type = call.checked_type
if isinstance(ret_type, _ty.TensorType):
ret_type = _ty.TensorType(get_shape(ret_type.shape), ret_type.dtype)
elif isinstance(ret_type, _ty.TupleType):
new_fields = []
for field in ret_type.fields:
if isinstance(field, _ty.TensorType):
new_fields.append(_ty.TensorType(get_shape(field.shape), field.dtype))
else:
new_fields.append(field)
ret_type = _ty.TupleType(new_fields)
is_dyn = _ty.is_dynamic(call.checked_type)
for arg in call.args:
is_dyn = is_dyn or _ty.is_dynamic(arg.checked_type)
# check if in the AutoTVM tracing mode, and disable if op is not in wanted list
env = autotvm.task.TaskExtractEnv.current
reenable_tracing = False
if env is not None and env.tracing:
if env.wanted_relay_ops is not None and op not in env.wanted_relay_ops:
env.tracing = False
reenable_tracing = True
if not is_dyn:
best_impl, outputs = select_implementation(op, call.attrs, inputs, ret_type, target)
else:
# TODO(@icemelon9): Allow tvm to generate multiple kernels for dynamic shapes.
# Currently, we just use the implementation with highest plevel
best_impl, outputs = select_implementation(
op, call.attrs, inputs, ret_type, target, use_autotvm=False
)
# re-enable AutoTVM tracing
if reenable_tracing:
env.tracing = True
return LoweredOutput(outputs, best_impl)
@tvm._ffi.register_object("relay.CompileEngine")
class CompileEngine(Object):
"""CompileEngine to get lowered code."""
def __init__(self):
raise RuntimeError("Cannot construct a CompileEngine")
def lower(self, source_func, target=None):
"""Lower a source_func to a CachedFunc.
Parameters
----------
source_func : Union[tvm.relay.Function, CCacheKey]
The source relay function.
target : tvm.Target
The target platform.
Returns
-------
cached_func: CachedFunc
The result of lowering.
"""
# pylint: disable=broad-except, import-outside-toplevel
try:
key = _get_cache_key(source_func, target)
return _backend._CompileEngineLower(self, key)
except Exception:
import traceback
msg = traceback.format_exc()
msg += "Error during compile func\n"
msg += "--------------------------\n"
msg += source_func.astext(show_meta_data=False)
msg += "--------------------------\n"
raise RuntimeError(msg)
def lower_shape_func(self, source_func, target=None):
key = _get_cache_key(source_func, target)
return _backend._CompileEngineLowerShapeFunc(self, key)
def jit(self, source_func, target=None):
"""JIT a source_func to a tvm.runtime.PackedFunc.
Parameters
----------
source_func : Union[tvm.relay.Function, CCacheKey]
The source relay function.
target : tvm.Target
The target platform.
Returns
-------
jited_func: tvm.runtime.PackedFunc
The result of jited function.
"""
key = _get_cache_key(source_func, target)
return _backend._CompileEngineJIT(self, key)
def clear(self):
"""clear the existing cached functions"""
_backend._CompileEngineClear(self)
def items(self):
"""List items in the cache.
Returns
-------
item_list : List[Tuple[CCacheKey, CCacheValue]]
The list of items.
"""
res = _backend._CompileEngineListItems(self)
assert len(res) % 2 == 0
return [(res[2 * i], res[2 * i + 1]) for i in range(len(res) // 2)]
def get_current_ccache_key(self):
return _backend._CompileEngineGetCurrentCCacheKey(self)
def dump(self):
"""Return a string representation of engine dump.
Returns
-------
dump : str
The dumped string representation
"""
items = self.items()
res = "====================================\n"
res += "CompilerEngine dump, %d items cached\n" % len(items)
for k, v in items:
res += "------------------------------------\n"
res += "target={}\n".format(k.target)
res += "use_count={}\n".format(v.use_count)
res += "func_name={}\n".format(v.cached_func.func_name)
res += k.source_func.astext() + "\n"
res += "===================================\n"
return res
def get():
"""Get the global compile engine.
Returns
-------
engine : tvm.relay.backend.CompileEngine
The compile engine.
"""
return _backend._CompileEngineGlobal()
| 33.397163
| 94
| 0.632264
|
d377504b22923445bde9f82d3bdb735f7d6df86e
| 2,218
|
py
|
Python
|
kibra/tlv.py
|
KiraleTech/KiBRA
|
08bcba625edbb491d4b325e138c69d45b17aca02
|
[
"MIT"
] | 11
|
2018-03-06T17:20:41.000Z
|
2021-05-16T16:57:53.000Z
|
kibra/tlv.py
|
KiraleTech/KiBRA
|
08bcba625edbb491d4b325e138c69d45b17aca02
|
[
"MIT"
] | 7
|
2019-06-28T02:27:47.000Z
|
2020-04-29T16:13:16.000Z
|
kibra/tlv.py
|
KiraleTech/KiBRA
|
08bcba625edbb491d4b325e138c69d45b17aca02
|
[
"MIT"
] | 3
|
2018-12-27T09:46:18.000Z
|
2020-09-28T02:48:41.000Z
|
class ThreadTLV:
'''Thread TLV representation'''
def __init__(self, data=None, t=None, l=None, v=None):
if isinstance(data, str):
self.data = bytearray.fromhex(data)
elif isinstance(data, bytes):
self.data = bytearray(data)
elif isinstance(data, bytearray):
self.data = data
elif data is None and isinstance(t, int) and isinstance(l, int):
self.data = bytearray()
self.data.append(t)
self.data.append(l)
if l > 0:
self.data.extend(bytearray(v))
else:
raise Exception('Bad data.')
self.type = int(self.data[0])
# TODO: extended length
self.length = int(self.data[1])
self.value = self.data[2:]
def __str__(self):
result = '%3u | %3u |' % (self.type, self.length)
if self.length != 0:
for byte in self.value:
result += ' %02x' % byte
return result
def array(self):
'''TLV data as bytearray'''
return self.data
@staticmethod
def sub_tlvs(data=None):
'''Generate ThreadTLV objects with the contents of the current TLV'''
tlvs = []
if not data:
return tlvs
elif isinstance(data, str):
data = bytearray.fromhex(data)
elif isinstance(data, bytes):
data = bytearray(data)
elif isinstance(data, bytearray):
pass
else:
raise Exception('Bad data.')
while len(data) > 1:
size = int(data[1]) + 2
tlvs.append(ThreadTLV(data[:size]))
data = data[size:]
return tlvs
@staticmethod
def sub_tlvs_str(payload):
sub_tlvs = ThreadTLV.sub_tlvs(payload)
result = ''
for tlv in sub_tlvs:
result += '{ %s } ' % tlv
return result
@staticmethod
def get_value(data, type_):
'''Return the array value of the TLV of type type_ from data'''
for tlv in ThreadTLV.sub_tlvs(data):
if tlv.type == type_:
# TODO: check size depending on the type
return tlv.value
return None
| 30.383562
| 77
| 0.533814
|
15920af2b04f06447b8b6fd02959a5f37baa856d
| 2,845
|
py
|
Python
|
scene/scene_gen_procedural/scene_gen_procedural.py
|
unhyperbolic/Blender-3D-Python-Scripts
|
cea6467fa5629e7461b5eb3cd408aed3526ce811
|
[
"MIT"
] | null | null | null |
scene/scene_gen_procedural/scene_gen_procedural.py
|
unhyperbolic/Blender-3D-Python-Scripts
|
cea6467fa5629e7461b5eb3cd408aed3526ce811
|
[
"MIT"
] | null | null | null |
scene/scene_gen_procedural/scene_gen_procedural.py
|
unhyperbolic/Blender-3D-Python-Scripts
|
cea6467fa5629e7461b5eb3cd408aed3526ce811
|
[
"MIT"
] | 1
|
2021-03-21T01:22:13.000Z
|
2021-03-21T01:22:13.000Z
|
import bpy
from mathutils import Vector
##### Clean-up results from an earlier run of the script
# Delete previously created collection if present
collectionName = 'MyCollection'
if collectionName in bpy.data.collections:
bpy.data.collections.remove(bpy.data.collections[collectionName])
# Helper code. Is this part of the standard API?
def garbage_collect():
found_garbage = True
while found_garbage:
found_garbage = False
for things in [ bpy.data.collections,
bpy.data.meshes,
bpy.data.objects,
bpy.data.materials,
bpy.data.textures,
bpy.data.images ]:
for block in things:
if block.users == 0:
things.remove(block)
found_garbage = True
# Garbage collect so that objects which became orphaned when
# deleting the collection actually disappear.
#
# Note that the names of meshes, objects, ... must be unique in blender.
# Calling, e.g., bpy.data.materials.new('MyMaterial') will name
# the new material MyMaterial.001, MyMaterial.002 if MyMaterial
# already existed.
# Running this script several times would create materials with
# those names if we did not call garbage collect here.
garbage_collect()
##### Create a simple scene
# Create new material
myMaterial = bpy.data.materials.new('MyMaterial')
# Create mesh using that material
myMesh = bpy.data.meshes.new('MyMesh')
myMesh.materials.append(myMaterial)
# Add vertices and faces to mesh
#
# A potential bug:
# Note that you would expect that the three edges
# specified below should be the three line segments
# connecting the origin to each of the three vertices
# of the triangle - but there seems to be a bug and
# only two of those edges appear.
myMesh.from_pydata(
[ Vector([1,0,0]), # Vertices
Vector([0,1,0]),
Vector([0,0,1]),
Vector([0,0,0])],
[ (0,3), # Edges
(1,3),
(2,3) ],
[ (0,1,2) ]) # Faces
# Create object using that mesh
myObject = bpy.data.objects.new('MyObject', myMesh)
# A subtle note:
# Note that we can change the mesh later with
# myOtherMesh = bpy.data.meshes.new('MyOtherMesh')
# myObject.data = myOtherMesh
#
# However, we cannot create an empty object and then
# attach a mesh to it later, so the following code will fail:
# myObject = bpy.data.objects.new('MyObject', None)
# myObject.data = myMesh
# Is there a way to change the type of an object later or
# specify the type as mesh when calling bpy.data.objects.new without
# giving a mesh?
# Create collection using that object
myCollection = bpy.data.collections.new(collectionName)
myCollection.objects.link(myObject)
# Add collection to scene collection
bpy.context.scene.collection.children.link(myCollection)
| 31.611111
| 72
| 0.68225
|
ce5196ffe991aa1108bc833cc6fd02bf11f9f6e7
| 625
|
py
|
Python
|
src/leetcode/1986/sol_2.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | 1
|
2021-07-11T03:20:10.000Z
|
2021-07-11T03:20:10.000Z
|
src/leetcode/1986/sol_2.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | 39
|
2021-07-10T05:21:09.000Z
|
2021-12-15T06:10:12.000Z
|
src/leetcode/1986/sol_2.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | null | null | null |
import typing
import functools
class Solution:
def minSessions(
self,
a: typing.List[int],
t0: int,
) -> int:
inf = 1 << 10
n = len(a)
@functools.lru_cache(maxsize=None)
def dfs(
s: int,
) -> typing.Tuple[int, int]:
if s == 0: return (0, 0)
res = (inf, 0)
for i in range(n):
if ~s >> i & 1: continue
u = s & ~(1 << i)
c, t = dfs(u)
if t + a[i] > t0:
c += 1
t = a[i]
else:
t += a[i]
res = min(res, (c, t))
return res
c, t = dfs((1 << n) - 1)
return c + (t > 0)
| 17.361111
| 38
| 0.4112
|
e6c076ad81943e058978be5e2f0a2e7f95742aea
| 565
|
py
|
Python
|
src/utils.py
|
nashant/topology-spread-constraints-mutator
|
99a4f4047f3b113c91f2bf2e17fd6b4b9fe150eb
|
[
"MIT"
] | null | null | null |
src/utils.py
|
nashant/topology-spread-constraints-mutator
|
99a4f4047f3b113c91f2bf2e17fd6b4b9fe150eb
|
[
"MIT"
] | null | null | null |
src/utils.py
|
nashant/topology-spread-constraints-mutator
|
99a4f4047f3b113c91f2bf2e17fd6b4b9fe150eb
|
[
"MIT"
] | null | null | null |
from OpenSSL.crypto import load_certificate, FILETYPE_PEM, X509Store, X509StoreContext, X509StoreContextError
def verify_chain():
with open("/app/certs/ca.crt") as f:
root_cert = load_certificate(FILETYPE_PEM, f.read())
with open("/app/certs/tls.crt") as f:
tls_cert = load_certificate(FILETYPE_PEM, f.read())
store = X509Store()
store.add_cert(root_cert)
store_ctx = X509StoreContext(store, tls_cert)
try:
store_ctx.verify_certificate()
return True
except X509StoreContextError:
return False
| 29.736842
| 109
| 0.700885
|
077e99053c0b687a8017412f7c5dc7bad540f1b3
| 3,049
|
py
|
Python
|
tests/parser/kernel_test.py
|
fifth-postulate/nand-optimizer
|
936d278411a073b8a7b860a47c9032e310736ba2
|
[
"MIT"
] | null | null | null |
tests/parser/kernel_test.py
|
fifth-postulate/nand-optimizer
|
936d278411a073b8a7b860a47c9032e310736ba2
|
[
"MIT"
] | null | null | null |
tests/parser/kernel_test.py
|
fifth-postulate/nand-optimizer
|
936d278411a073b8a7b860a47c9032e310736ba2
|
[
"MIT"
] | null | null | null |
from parser.kernel import Any, Avoid, Chain, Filter, Map, Predicate, Producing, Sequence, Success, Word, atleast, many, optionally
def assert_unique_parse(parses, expected_result, expected_rest):
assert len(parses) == 1
(result, rest) = parses[0]
assert result == expected_result
assert rest == expected_rest
def assert_longest_parse(parses, expected_result, expected_rest):
assert len(parses) > 0
(result, rest) = parses[0]
assert result == expected_result
assert rest == rest
def assert_failed(parses):
assert not parses
def test_success():
parser = Success()
parses = parser.parse('Test')
assert_unique_parse(parses, '', 'Test')
def test_predicate():
parser = Predicate(lambda character: character.isdigit())
parses = parser.parse('3435')
assert_unique_parse(parses, '3', '435')
def test_predicate_with_empty_string():
parser = Predicate(lambda character: character.isdigit())
parses = parser.parse('')
assert_failed(parses)
def test_word():
parser = Word('Hello')
parses = parser.parse('Hello, World!')
assert_unique_parse(parses, 'Hello', ', World!')
def test_any():
parser = Any([
Word('A'),
Word('B'),
])
parses = parser.parse('AB')
assert_unique_parse(parses, 'A', 'B')
def test_sequence():
parser = Sequence([
Word('A'),
Word('B'),
])
parses = parser.parse('AB')
assert_unique_parse(parses, ['A', 'B'], '')
def test_map():
parser = Map(lambda result: ''.join(result), Sequence([
Word('A'),
Word('B'),
]))
parses = parser.parse('ABC')
assert_unique_parse(parses, 'AB', 'C')
def test_filter():
parser = Filter(lambda out: out[0].isupper(), Word('A'))
parses = parser.parse('A')
assert_unique_parse(parses, 'A', '')
def test_avoid():
parser = Avoid('A!')
parses = parser.parse('BBBBBBBBBA!')
assert_unique_parse(parses, 'BBBBBBBBB', 'A!')
def test_avoid_when_failing():
parser = Avoid('A!')
parses = parser.parse('A!')
assert_unique_parse(parses, '', 'A!')
def test_chain():
parser = Chain(Word('ABC'), Word('A'))
parses = parser.parse('ABCD')
assert_unique_parse(parses, 'A', 'BCD')
def test_producing():
parser = Producing(many(Word('A')))
parses = parser.parse('')
assert_failed(parses)
def test_many():
parser = many(Word('A'))
parses = parser.parse('AAA')
assert len(parses) == 4
assert parses[0] == (['A', 'A', 'A'], '')
assert parses[1] == (['A', 'A'], 'A')
assert parses[2] == (['A'], 'AA')
assert parses[3] == ([], 'AAA')
def test_atleast():
parser = atleast(2, Word('A'))
parses = parser.parse('AAA')
assert len(parses) == 2
assert parses[0] == (['A', 'A', 'A'], '')
assert parses[1] == (['A', 'A'], 'A')
def test_optionally():
parser = optionally(Word('A'))
parses = parser.parse('A')
assert len(parses) == 2
assert parses[0] == ('A', '')
assert parses[1] == ('', 'A')
| 21.935252
| 130
| 0.601181
|
062437270ae0b0f813f2b82182b4cfb57c3714db
| 14,436
|
py
|
Python
|
utils.py
|
MarvinTheParanoid/EmailSum
|
0f5bfa25d0a9029edd6c39a85fbc91f96bf471ac
|
[
"MIT"
] | 28
|
2021-08-02T01:23:43.000Z
|
2022-02-22T06:17:41.000Z
|
utils.py
|
MarvinTheParanoid/EmailSum
|
0f5bfa25d0a9029edd6c39a85fbc91f96bf471ac
|
[
"MIT"
] | null | null | null |
utils.py
|
MarvinTheParanoid/EmailSum
|
0f5bfa25d0a9029edd6c39a85fbc91f96bf471ac
|
[
"MIT"
] | 2
|
2021-08-02T04:27:51.000Z
|
2021-12-01T22:49:12.000Z
|
import math
import numpy as np
import torch
from torch.utils.data import Sampler
from torch.optim.lr_scheduler import LambdaLR
from typing import Optional
class SortishSampler(Sampler):
"Go through the text data by order of src length with a bit of randomness. From fastai repo."
def __init__(self, data, batch_size):
self.data, self.bs = data, batch_size
def key(self, i):
return len(self.data[i])
def __len__(self) -> int:
return len(self.data)
def __iter__(self):
idxs = np.random.permutation(len(self.data))
sz = self.bs * 50
ck_idx = [idxs[i : i + sz] for i in range(0, len(idxs), sz)]
sort_idx = np.concatenate([sorted(s, key=self.key, reverse=True) for s in ck_idx])
sz = self.bs
ck_idx = [sort_idx[i : i + sz] for i in range(0, len(sort_idx), sz)]
max_ck = np.argmax([self.key(ck[0]) for ck in ck_idx]) # find the chunk with the largest key,
ck_idx[0], ck_idx[max_ck] = ck_idx[max_ck], ck_idx[0] # then make sure it goes first.
sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([], dtype=np.int)
sort_idx = np.concatenate((ck_idx[0], sort_idx))
return iter(sort_idx)
def get_inverse_sqrt_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
""" Create a schedule with a learning rate that decreases as 1/sqrt(current_step) after
being constant during a warmup period.
"""
def lr_lambda(current_step):
return max(
0.0, 1.0 / np.sqrt(float(max(current_step, float(max(1, num_warmup_steps)))))
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_range_vector(size: int, device: int) -> torch.Tensor:
"""
Returns a range vector with the desired size, starting at 0. The CUDA implementation
is meant to avoid copy data from CPU to GPU.
"""
if device > -1:
return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1
else:
return torch.arange(0, size, dtype=torch.long)
def get_device_of(tensor: torch.Tensor) -> int:
"""
Returns the device of the tensor.
"""
if not tensor.is_cuda:
return -1
else:
return tensor.get_device()
def flatten_and_batch_shift_indices(indices: torch.Tensor, sequence_length: int) -> torch.Tensor:
"""
This is a subroutine for [`batched_index_select`](./util.md#batched_index_select).
The given `indices` of size `(batch_size, d_1, ..., d_n)` indexes into dimension 2 of a
target tensor, which has size `(batch_size, sequence_length, embedding_size)`. This
function returns a vector that correctly indexes into the flattened target. The sequence
length of the target must be provided to compute the appropriate offsets.
```python
indices = torch.ones([2,3], dtype=torch.long)
# Sequence length of the target tensor.
sequence_length = 10
shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)
# Indices into the second element in the batch are correctly shifted
# to take into account that the target tensor will be flattened before
# the indices are applied.
assert shifted_indices == [1, 1, 1, 11, 11, 11]
```
# Parameters
indices : `torch.LongTensor`, required.
sequence_length : `int`, required.
The length of the sequence the indices index into.
This must be the second dimension of the tensor.
# Returns
offset_indices : `torch.LongTensor`
"""
# Shape: (batch_size)
if torch.max(indices) >= sequence_length or torch.min(indices) < 0:
print(f"All elements in indices should be in range (0, {sequence_length - 1})")
offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices
def batched_index_select(
target: torch.Tensor,
indices: torch.LongTensor,
flattened_indices: Optional[torch.LongTensor] = None,
) -> torch.Tensor:
"""
The given `indices` of size `(batch_size, d_1, ..., d_n)` indexes into the sequence
dimension (dimension 2) of the target, which has size `(batch_size, sequence_length,
embedding_size)`.
This function returns selected values in the target with respect to the provided indices, which
have size `(batch_size, d_1, ..., d_n, embedding_size)`. This can use the optionally
precomputed `flattened_indices` with size `(batch_size * d_1 * ... * d_n)` if given.
An example use case of this function is looking up the start and end indices of spans in a
sequence tensor. This is used in the
[CoreferenceResolver](../models/coreference_resolution/coref.md). Model to select
contextual word representations corresponding to the start and end indices of mentions. The key
reason this can't be done with basic torch functions is that we want to be able to use look-up
tensors with an arbitrary number of dimensions (for example, in the coref model, we don't know
a-priori how many spans we are looking up).
# Parameters
target : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : `torch.LongTensor`
A tensor of shape (batch_size, ...), where each element is an index into the
`sequence_length` dimension of the `target` tensor.
flattened_indices : `Optional[torch.Tensor]`, optional (default = `None`)
An optional tensor representing the result of calling `flatten_and_batch_shift_indices`
on `indices`. This is helpful in the case that the indices can be flattened once and
cached for many batch lookups.
# Returns
selected_targets : `torch.Tensor`
A tensor with shape [indices.size(), target.size(-1)] representing the embedded indices
extracted from the batch flattened target tensor.
"""
if flattened_indices is None:
# Shape: (batch_size * d_1 * ... * d_n)
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
# Shape: (batch_size * d_1 * ... * d_n, embedding_size)
flattened_selected = flattened_target.index_select(0, flattened_indices)
selected_shape = list(indices.size()) + [target.size(-1)]
# Shape: (batch_size, d_1, ..., d_n, embedding_size)
selected_targets = flattened_selected.view(*selected_shape)
return selected_targets
class Adafactor(torch.optim.Optimizer):
"""Implements Adafactor algorithm.
This implementation is based on:
`Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
(see https://arxiv.org/abs/1804.04235)
Note that this optimizer internally adjusts the learning rate
depending on the *scale_parameter*, *relative_step* and
*warmup_init* options. To use a manual (external) learning rate
schedule you should set `scale_parameter=False` and
`relative_step=False`.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): external learning rate (default: None)
eps (tuple[float, float]): regularization constans for square gradient
and parameter scale respectively (default: (1e-30, 1e-3))
clip_threshold (float): threshold of root mean square of
final gradient update (default: 1.0)
decay_rate (float): coefficient used to compute running averages of square
gradient (default: -0.8)
beta1 (float): coefficient used for computing running averages of gradient
(default: None)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_parameter (bool): if True, learning rate is scaled by root mean square of
parameter (default: True)
relative_step (bool): if True, time-dependent learning rate is computed
instead of external learning rate (default: True)
warmup_init (bool): time-dependent learning rate computation depends on
whether warm-up initialization is being used (default: False)
"""
def __init__(self, params, lr=None, eps=(1e-30, 1e-3), clip_threshold=1.0,
decay_rate=-0.8, beta1=None, weight_decay=0.0, scale_parameter=True,
relative_step=True, warmup_init=False):
if lr is not None and relative_step:
raise ValueError('Cannot combine manual lr and relative_step options')
if warmup_init and not relative_step:
raise ValueError('warmup_init requires relative_step=True')
defaults = dict(lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate,
beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter,
relative_step=relative_step, warmup_init=warmup_init)
super(Adafactor, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return False
def _get_lr(self, param_group, param_state):
rel_step_sz = param_group['lr']
if param_group['relative_step']:
min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2
rel_step_sz = min(min_step, 1.0/math.sqrt(param_state['step']))
param_scale = 1.0
if param_group['scale_parameter']:
param_scale = max(param_group['eps'][1], param_state['RMS'])
return param_scale * rel_step_sz
def _get_options(self, param_group, param_shape):
factored = len(param_shape) >= 2
use_first_moment = param_group['beta1'] is not None
return factored, use_first_moment
def _rms(self, tensor):
return tensor.norm(2) / (tensor.numel() ** 0.5)
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
r_factor = (
exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)
).rsqrt_()
c_factor = exp_avg_sq_col.rsqrt()
return torch.mm(r_factor.unsqueeze(-1), c_factor.unsqueeze(0))
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError('Adafactor does not support sparse gradients.')
state = self.state[p]
grad_shape = grad.shape
factored, use_first_moment = self._get_options(group, grad_shape)
# State Initialization
if len(state) == 0:
state['step'] = 0
if use_first_moment:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(grad)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1]).to(grad)
state['exp_avg_sq_col'] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
else:
if use_first_moment:
state['exp_avg'] = state['exp_avg'].to(grad)
if factored:
state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)
state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)
else:
state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state['step'] += 1
state['RMS'] = self._rms(p_data_fp32)
group['lr'] = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])
update = (grad**2) + group['eps'][0]
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1))
exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2))
# Approximation of exponential moving average of square of gradient
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update)
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_(
(self._rms(update) / group['clip_threshold']).clamp_(min=1.0)
)
update.mul_(group['lr'])
if use_first_moment:
exp_avg = state['exp_avg']
exp_avg.mul_(group['beta1']).add_(1 - group['beta1'], update)
update = exp_avg
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
p_data_fp32.add_(-update)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
| 43.481928
| 119
| 0.62829
|
69ef1403156fa89463dd910fbcb088cf211b4dea
| 14,903
|
py
|
Python
|
corporate/views.py
|
TylerPham2000/zulip
|
2e7aaba0dde5517b4a55cb0bd782f009be45e3ba
|
[
"Apache-2.0"
] | null | null | null |
corporate/views.py
|
TylerPham2000/zulip
|
2e7aaba0dde5517b4a55cb0bd782f009be45e3ba
|
[
"Apache-2.0"
] | null | null | null |
corporate/views.py
|
TylerPham2000/zulip
|
2e7aaba0dde5517b4a55cb0bd782f009be45e3ba
|
[
"Apache-2.0"
] | null | null | null |
import logging
from decimal import Decimal
from typing import Any, Dict, Optional, Union
from urllib.parse import urlencode, urljoin, urlunsplit
import stripe
from django.conf import settings
from django.core import signing
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext as _
from corporate.lib.stripe import (
DEFAULT_INVOICE_DAYS_UNTIL_DUE,
MAX_INVOICED_LICENSES,
MIN_INVOICED_LICENSES,
STRIPE_PUBLISHABLE_KEY,
BillingError,
do_change_plan_status,
do_replace_payment_source,
downgrade_at_the_end_of_billing_cycle,
downgrade_now_without_creating_additional_invoices,
get_latest_seat_count,
make_end_of_cycle_updates_if_needed,
process_initial_upgrade,
renewal_amount,
sign_string,
start_of_next_billing_cycle,
stripe_get_customer,
unsign_string,
update_sponsorship_status,
)
from corporate.models import (
CustomerPlan,
get_current_plan_by_customer,
get_current_plan_by_realm,
get_customer_by_realm,
)
from zerver.decorator import (
require_billing_access,
require_organization_member,
zulip_login_required,
)
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.send_email import FromAddress, send_email
from zerver.lib.validator import check_int, check_string
from zerver.models import UserProfile, get_realm
billing_logger = logging.getLogger("corporate.stripe")
def unsign_seat_count(signed_seat_count: str, salt: str) -> int:
try:
return int(unsign_string(signed_seat_count, salt))
except signing.BadSignature:
raise BillingError("tampered seat count")
def check_upgrade_parameters(
billing_modality: str,
schedule: str,
license_management: Optional[str],
licenses: Optional[int],
has_stripe_token: bool,
seat_count: int,
) -> None:
if billing_modality not in ["send_invoice", "charge_automatically"]:
raise BillingError("unknown billing_modality")
if schedule not in ["annual", "monthly"]:
raise BillingError("unknown schedule")
if license_management not in ["automatic", "manual"]:
raise BillingError("unknown license_management")
if billing_modality == "charge_automatically":
if not has_stripe_token:
raise BillingError("autopay with no card")
min_licenses = seat_count
max_licenses = None
if billing_modality == "send_invoice":
min_licenses = max(seat_count, MIN_INVOICED_LICENSES)
max_licenses = MAX_INVOICED_LICENSES
if licenses is None or licenses < min_licenses:
raise BillingError(
"not enough licenses", _("You must invoice for at least {} users.").format(min_licenses)
)
if max_licenses is not None and licenses > max_licenses:
message = _(
"Invoices with more than {} licenses can't be processed from this page. To complete "
"the upgrade, please contact {}."
).format(max_licenses, settings.ZULIP_ADMINISTRATOR)
raise BillingError("too many licenses", message)
# Should only be called if the customer is being charged automatically
def payment_method_string(stripe_customer: stripe.Customer) -> str:
stripe_source: Optional[Union[stripe.Card, stripe.Source]] = stripe_customer.default_source
# In case of e.g. an expired card
if stripe_source is None: # nocoverage
return _("No payment method on file")
if stripe_source.object == "card":
assert isinstance(stripe_source, stripe.Card)
return _("{brand} ending in {last4}").format(
brand=stripe_source.brand,
last4=stripe_source.last4,
)
# There might be one-off stuff we do for a particular customer that
# would land them here. E.g. by default we don't support ACH for
# automatic payments, but in theory we could add it for a customer via
# the Stripe dashboard.
return _("Unknown payment method. Please contact {email}.").format(
email=settings.ZULIP_ADMINISTRATOR,
) # nocoverage
@require_organization_member
@has_request_variables
def upgrade(
request: HttpRequest,
user: UserProfile,
billing_modality: str = REQ(validator=check_string),
schedule: str = REQ(validator=check_string),
license_management: Optional[str] = REQ(validator=check_string, default=None),
licenses: Optional[int] = REQ(validator=check_int, default=None),
stripe_token: Optional[str] = REQ(validator=check_string, default=None),
signed_seat_count: str = REQ(validator=check_string),
salt: str = REQ(validator=check_string),
) -> HttpResponse:
try:
seat_count = unsign_seat_count(signed_seat_count, salt)
if billing_modality == "charge_automatically" and license_management == "automatic":
licenses = seat_count
if billing_modality == "send_invoice":
schedule = "annual"
license_management = "manual"
check_upgrade_parameters(
billing_modality,
schedule,
license_management,
licenses,
stripe_token is not None,
seat_count,
)
assert licenses is not None
automanage_licenses = license_management == "automatic"
billing_schedule = {"annual": CustomerPlan.ANNUAL, "monthly": CustomerPlan.MONTHLY}[
schedule
]
process_initial_upgrade(user, licenses, automanage_licenses, billing_schedule, stripe_token)
except BillingError as e:
if not settings.TEST_SUITE: # nocoverage
billing_logger.warning(
"BillingError during upgrade: %s. user=%s, realm=%s (%s), billing_modality=%s, "
"schedule=%s, license_management=%s, licenses=%s, has stripe_token: %s",
e.description,
user.id,
user.realm.id,
user.realm.string_id,
billing_modality,
schedule,
license_management,
licenses,
stripe_token is not None,
)
return json_error(e.message, data={"error_description": e.description})
except Exception:
billing_logger.exception("Uncaught exception in billing:", stack_info=True)
error_message = BillingError.CONTACT_SUPPORT.format(email=settings.ZULIP_ADMINISTRATOR)
error_description = "uncaught exception during upgrade"
return json_error(error_message, data={"error_description": error_description})
else:
return json_success()
@zulip_login_required
def initial_upgrade(request: HttpRequest) -> HttpResponse:
user = request.user
if not settings.BILLING_ENABLED or user.is_guest:
return render(request, "404.html", status=404)
billing_page_url = reverse(billing_home)
customer = get_customer_by_realm(user.realm)
if customer is not None and (
get_current_plan_by_customer(customer) is not None or customer.sponsorship_pending
):
if request.GET.get("onboarding") is not None:
billing_page_url = f"{billing_page_url}?onboarding=true"
return HttpResponseRedirect(billing_page_url)
if user.realm.plan_type == user.realm.STANDARD_FREE:
return HttpResponseRedirect(billing_page_url)
percent_off = Decimal(0)
if customer is not None and customer.default_discount is not None:
percent_off = customer.default_discount
seat_count = get_latest_seat_count(user.realm)
signed_seat_count, salt = sign_string(str(seat_count))
context: Dict[str, Any] = {
"realm": user.realm,
"publishable_key": STRIPE_PUBLISHABLE_KEY,
"email": user.delivery_email,
"seat_count": seat_count,
"signed_seat_count": signed_seat_count,
"salt": salt,
"min_invoiced_licenses": max(seat_count, MIN_INVOICED_LICENSES),
"default_invoice_days_until_due": DEFAULT_INVOICE_DAYS_UNTIL_DUE,
"plan": "Zulip Standard",
"free_trial_days": settings.FREE_TRIAL_DAYS,
"onboarding": request.GET.get("onboarding") is not None,
"page_params": {
"seat_count": seat_count,
"annual_price": 8000,
"monthly_price": 800,
"percent_off": float(percent_off),
},
}
response = render(request, "corporate/upgrade.html", context=context)
return response
@require_organization_member
@has_request_variables
def sponsorship(
request: HttpRequest,
user: UserProfile,
organization_type: str = REQ("organization-type", validator=check_string),
website: str = REQ("website", validator=check_string),
description: str = REQ("description", validator=check_string),
) -> HttpResponse:
realm = user.realm
requested_by = user.full_name
user_role = user.get_role_name()
support_realm_uri = get_realm(settings.STAFF_SUBDOMAIN).uri
support_url = urljoin(
support_realm_uri,
urlunsplit(("", "", reverse("support"), urlencode({"q": realm.string_id}), "")),
)
context = {
"requested_by": requested_by,
"user_role": user_role,
"string_id": realm.string_id,
"support_url": support_url,
"organization_type": organization_type,
"website": website,
"description": description,
}
send_email(
"zerver/emails/sponsorship_request",
to_emails=[FromAddress.SUPPORT],
from_name="Zulip sponsorship",
from_address=FromAddress.tokenized_no_reply_address(),
reply_to_email=user.delivery_email,
context=context,
)
update_sponsorship_status(realm, True, acting_user=user)
user.is_billing_admin = True
user.save(update_fields=["is_billing_admin"])
return json_success()
@zulip_login_required
def billing_home(request: HttpRequest) -> HttpResponse:
user = request.user
customer = get_customer_by_realm(user.realm)
context: Dict[str, Any] = {
"admin_access": user.has_billing_access,
"has_active_plan": False,
}
if user.realm.plan_type == user.realm.STANDARD_FREE:
context["is_sponsored"] = True
return render(request, "corporate/billing.html", context=context)
if customer is None:
return HttpResponseRedirect(reverse(initial_upgrade))
if customer.sponsorship_pending:
context["sponsorship_pending"] = True
return render(request, "corporate/billing.html", context=context)
if not CustomerPlan.objects.filter(customer=customer).exists():
return HttpResponseRedirect(reverse(initial_upgrade))
if not user.has_billing_access:
return render(request, "corporate/billing.html", context=context)
plan = get_current_plan_by_customer(customer)
if plan is not None:
now = timezone_now()
new_plan, last_ledger_entry = make_end_of_cycle_updates_if_needed(plan, now)
if last_ledger_entry is not None:
if new_plan is not None: # nocoverage
plan = new_plan
assert plan is not None # for mypy
free_trial = plan.status == CustomerPlan.FREE_TRIAL
downgrade_at_end_of_cycle = plan.status == CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE
switch_to_annual_at_end_of_cycle = (
plan.status == CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE
)
licenses = last_ledger_entry.licenses
licenses_used = get_latest_seat_count(user.realm)
# Should do this in javascript, using the user's timezone
renewal_date = "{dt:%B} {dt.day}, {dt.year}".format(
dt=start_of_next_billing_cycle(plan, now)
)
renewal_cents = renewal_amount(plan, now)
charge_automatically = plan.charge_automatically
stripe_customer = stripe_get_customer(customer.stripe_customer_id)
if charge_automatically:
payment_method = payment_method_string(stripe_customer)
else:
payment_method = "Billed by invoice"
context.update(
plan_name=plan.name,
has_active_plan=True,
free_trial=free_trial,
downgrade_at_end_of_cycle=downgrade_at_end_of_cycle,
automanage_licenses=plan.automanage_licenses,
switch_to_annual_at_end_of_cycle=switch_to_annual_at_end_of_cycle,
licenses=licenses,
licenses_used=licenses_used,
renewal_date=renewal_date,
renewal_amount=f"{renewal_cents / 100.:,.2f}",
payment_method=payment_method,
charge_automatically=charge_automatically,
publishable_key=STRIPE_PUBLISHABLE_KEY,
stripe_email=stripe_customer.email,
CustomerPlan=CustomerPlan,
onboarding=request.GET.get("onboarding") is not None,
)
return render(request, "corporate/billing.html", context=context)
@require_billing_access
@has_request_variables
def change_plan_status(
request: HttpRequest, user: UserProfile, status: int = REQ("status", validator=check_int)
) -> HttpResponse:
assert status in [
CustomerPlan.ACTIVE,
CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE,
CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE,
CustomerPlan.ENDED,
]
plan = get_current_plan_by_realm(user.realm)
assert plan is not None # for mypy
if status == CustomerPlan.ACTIVE:
assert plan.status == CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE
do_change_plan_status(plan, status)
elif status == CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE:
assert plan.status == CustomerPlan.ACTIVE
downgrade_at_the_end_of_billing_cycle(user.realm)
elif status == CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE:
assert plan.billing_schedule == CustomerPlan.MONTHLY
assert plan.status == CustomerPlan.ACTIVE
assert plan.fixed_price is None
do_change_plan_status(plan, status)
elif status == CustomerPlan.ENDED:
assert plan.status == CustomerPlan.FREE_TRIAL
downgrade_now_without_creating_additional_invoices(user.realm)
return json_success()
@require_billing_access
@has_request_variables
def replace_payment_source(
request: HttpRequest,
user: UserProfile,
stripe_token: str = REQ("stripe_token", validator=check_string),
) -> HttpResponse:
try:
do_replace_payment_source(user, stripe_token, pay_invoices=True)
except BillingError as e:
return json_error(e.message, data={"error_description": e.description})
return json_success()
| 37.824873
| 100
| 0.689995
|
427ce8d9421098a24df79b3e1633cdf5a433169f
| 11,918
|
py
|
Python
|
electrum_dash/gui/kivy/uix/dialogs/password_dialog.py
|
abuhabban/electrum-dash
|
36a0c9596919a6d9b64e99bcd72a18674353ce43
|
[
"MIT"
] | 1
|
2021-03-19T05:25:15.000Z
|
2021-03-19T05:25:15.000Z
|
electrum_dash/gui/kivy/uix/dialogs/password_dialog.py
|
abuhabban/electrum-dash
|
36a0c9596919a6d9b64e99bcd72a18674353ce43
|
[
"MIT"
] | null | null | null |
electrum_dash/gui/kivy/uix/dialogs/password_dialog.py
|
abuhabban/electrum-dash
|
36a0c9596919a6d9b64e99bcd72a18674353ce43
|
[
"MIT"
] | null | null | null |
from typing import Callable, TYPE_CHECKING, Optional, Union
import os
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from decimal import Decimal
from kivy.clock import Clock
from electrum_dash.util import InvalidPassword
from electrum_dash.wallet import WalletStorage, Wallet
from electrum_dash.gui.kivy.i18n import _
from electrum_dash.wallet_db import WalletDB
from .wallets import WalletDialog
if TYPE_CHECKING:
from ...main_window import ElectrumWindow
from electrum_dash.wallet import Abstract_Wallet
from electrum_dash.storage import WalletStorage
Builder.load_string('''
#:import KIVY_GUI_PATH electrum_dash.gui.kivy.KIVY_GUI_PATH
<PasswordDialog@Popup>
id: popup
title: 'Dash Electrum'
message: ''
basename:''
is_change: False
require_password: True
BoxLayout:
size_hint: 1, 1
orientation: 'vertical'
spacing: '12dp'
padding: '12dp'
BoxLayout:
size_hint: 1, None
orientation: 'horizontal'
height: '40dp'
Label:
size_hint: 0.85, None
height: '40dp'
font_size: '20dp'
text: _('Wallet') + ': ' + root.basename
text_size: self.width, None
IconButton:
size_hint: 0.15, None
height: '40dp'
icon: f'atlas://{KIVY_GUI_PATH}/theming/light/btn_create_account'
on_release: root.select_file()
disabled: root.is_change
opacity: 0 if root.is_change else 1
Widget:
size_hint: 1, 0.05
Label:
size_hint: 0.70, None
font_size: '20dp'
text: root.message
text_size: self.width, None
Widget:
size_hint: 1, 0.05
BoxLayout:
orientation: 'horizontal'
id: box_generic_password
disabled: not root.require_password
opacity: int(root.require_password)
size_hint_y: 0.05
height: '40dp'
TextInput:
height: '40dp'
id: textinput_generic_password
valign: 'center'
multiline: False
on_text_validate:
popup.on_password(self.text)
password: True
size_hint: 0.85, None
unfocus_on_touch: False
focus: True
IconButton:
height: '40dp'
size_hint: 0.15, None
icon: f'atlas://{KIVY_GUI_PATH}/theming/light/eye1'
icon_size: '40dp'
on_release:
textinput_generic_password.password = False if textinput_generic_password.password else True
Widget:
size_hint: 1, 1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.5
Button:
text: 'Cancel'
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: 'Next'
size_hint: 0.5, None
height: '48dp'
on_release:
popup.on_password(textinput_generic_password.text)
<PincodeDialog@Popup>
id: popup
title: 'Dash Electrum'
message: ''
basename:''
BoxLayout:
size_hint: 1, 1
orientation: 'vertical'
Widget:
size_hint: 1, 0.05
Label:
size_hint: 0.70, None
font_size: '20dp'
text: root.message
text_size: self.width, None
Widget:
size_hint: 1, 0.05
Label:
id: label_pin
size_hint_y: 0.05
font_size: '50dp'
text: '*'*len(kb.password) + '-'*(6-len(kb.password))
size: self.texture_size
Widget:
size_hint: 1, 0.05
GridLayout:
id: kb
size_hint: 1, None
height: self.minimum_height
update_amount: popup.update_password
password: ''
on_password: popup.on_password(self.password)
spacing: '2dp'
cols: 3
KButton:
text: '1'
KButton:
text: '2'
KButton:
text: '3'
KButton:
text: '4'
KButton:
text: '5'
KButton:
text: '6'
KButton:
text: '7'
KButton:
text: '8'
KButton:
text: '9'
KButton:
text: 'Clear'
KButton:
text: '0'
KButton:
text: '<'
''')
class AbstractPasswordDialog(Factory.Popup):
def __init__(self, app: 'ElectrumWindow', *,
check_password = None,
on_success: Callable = None, on_failure: Callable = None,
is_change: bool = False,
is_password: bool = True, # whether this is for a generic password or for a numeric PIN
has_password: bool = False,
message: str = '',
basename:str=''):
Factory.Popup.__init__(self)
self.app = app
self.pw_check = check_password
self.message = message
self.on_success = on_success
self.on_failure = on_failure
self.success = False
self.is_change = is_change
self.pw = None
self.new_password = None
self.title = 'Dash Electrum'
self.level = 1 if is_change and not has_password else 0
self.basename = basename
self.update_screen()
def update_screen(self):
self.clear_password()
if self.level == 0 and self.message == '':
self.message = self.enter_pw_message
elif self.level == 1:
self.message = self.enter_new_pw_message
elif self.level == 2:
self.message = self.confirm_new_pw_message
def check_password(self, password):
if self.level > 0:
return True
try:
self.pw_check(password)
return True
except InvalidPassword as e:
return False
def on_dismiss(self):
if self.level == 1 and self.allow_disable and self.on_success:
self.on_success(self.pw, None)
return False
if not self.success:
if self.on_failure:
self.on_failure()
else:
# keep dialog open
return True
else:
if self.on_success:
self.ids.textinput_generic_password.focus = False
args = (self.pw, self.new_password) if self.is_change else (self.pw,)
Clock.schedule_once(lambda dt: self.on_success(*args), 0.1)
def update_password(self, c):
kb = self.ids.kb
text = kb.password
if c == '<':
text = text[:-1]
elif c == 'Clear':
text = ''
else:
text += c
kb.password = text
def do_check(self, pw):
if self.check_password(pw):
if self.is_change is False:
self.success = True
self.pw = pw
self.message = _('Please wait...')
self.dismiss()
elif self.level == 0:
self.level = 1
self.pw = pw
self.update_screen()
elif self.level == 1:
self.level = 2
self.new_password = pw
self.update_screen()
elif self.level == 2:
self.success = pw == self.new_password
self.dismiss()
else:
self.app.show_error(self.wrong_password_message)
self.clear_password()
class PasswordDialog(AbstractPasswordDialog):
enter_pw_message = _('Enter your password')
enter_new_pw_message = _('Enter new password')
confirm_new_pw_message = _('Confirm new password')
wrong_password_message = _('Wrong password')
allow_disable = False
def __init__(self, app, **kwargs):
AbstractPasswordDialog.__init__(self, app, **kwargs)
def clear_password(self):
self.ids.textinput_generic_password.text = ''
def on_password(self, pw: str):
#
if not self.require_password:
self.success = True
self.message = _('Please wait...')
self.dismiss()
return
# if setting new generic password, enforce min length
if self.level > 0:
if len(pw) < 6:
self.app.show_error(_('Password is too short (min {} characters)').format(6))
return
# don't enforce minimum length on existing
self.do_check(pw)
class PincodeDialog(AbstractPasswordDialog):
enter_pw_message = _('Enter your PIN')
enter_new_pw_message = _('Enter new PIN')
confirm_new_pw_message = _('Confirm new PIN')
wrong_password_message = _('Wrong PIN')
allow_disable = True
def __init__(self, app, **kwargs):
AbstractPasswordDialog.__init__(self, app, **kwargs)
def clear_password(self):
self.ids.kb.password = ''
def on_password(self, pw: str):
# PIN codes are exactly 6 chars
if len(pw) >= 6:
self.do_check(pw)
class ChangePasswordDialog(PasswordDialog):
def __init__(self, app, wallet, on_success, on_failure):
PasswordDialog.__init__(self, app,
basename = wallet.basename(),
check_password = wallet.check_password,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password=wallet.has_password())
class OpenWalletDialog(PasswordDialog):
def __init__(self, app, path, callback):
self.app = app
self.callback = callback
PasswordDialog.__init__(self, app,
on_success=lambda pw: self.callback(pw, self.storage),
on_failure=self.app.stop)
self.init_storage_from_path(path)
def select_file(self):
dirname = os.path.dirname(self.app.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.init_storage_from_path)
d.open()
def init_storage_from_path(self, path):
self.storage = WalletStorage(path)
self.basename = self.storage.basename()
if not self.storage.file_exists():
self.require_password = False
self.message = _('Press Next to create')
elif self.storage.is_encrypted():
if not self.storage.is_encrypted_with_user_pw():
raise Exception("Kivy GUI does not support this type of encrypted wallet files.")
self.require_password = True
self.pw_check = self.storage.check_password
self.message = self.enter_pw_message
else:
# it is a bit wasteful load the wallet here and load it again in main_window,
# but that is fine, because we are progressively enforcing storage encryption.
db = WalletDB(self.storage.read(), manual_upgrades=False)
if db.upgrade_done:
self.storage.backup_old_version()
self.app.show_backup_msg()
if db.check_unfinished_multisig():
self.require_password = False
else:
wallet = Wallet(db, self.storage, config=self.app.electrum_config)
self.require_password = wallet.has_password()
self.pw_check = wallet.check_password
self.message = (self.enter_pw_message if self.require_password
else _('Wallet not encrypted'))
| 32.652055
| 112
| 0.554288
|
af4e7c76b3ddb758a4d3d369b09db57163099e91
| 141
|
py
|
Python
|
src/main/resources/docs/tests/E1121.py
|
h314to/codacy-pylint
|
9d31567db6188e1b31ce0e1567998f64946502df
|
[
"Apache-2.0"
] | null | null | null |
src/main/resources/docs/tests/E1121.py
|
h314to/codacy-pylint
|
9d31567db6188e1b31ce0e1567998f64946502df
|
[
"Apache-2.0"
] | null | null | null |
src/main/resources/docs/tests/E1121.py
|
h314to/codacy-pylint
|
9d31567db6188e1b31ce0e1567998f64946502df
|
[
"Apache-2.0"
] | null | null | null |
##Patterns: E1121
def my_method(arg1, arg2):
print arg1 + arg2
def main():
##Err: E1121
my_method(1, 3, 4)
my_method(1, 3)
| 14.1
| 26
| 0.595745
|
eadb4ebe9bc113b67e9e8df8e3449dd345ab118d
| 351
|
py
|
Python
|
mysite/chat/views.py
|
MJohnson159/CINS465-F18-Examples
|
c5fb0d6bdd0937e21e31ed1dbcfe3b6dd44e43c8
|
[
"MIT"
] | null | null | null |
mysite/chat/views.py
|
MJohnson159/CINS465-F18-Examples
|
c5fb0d6bdd0937e21e31ed1dbcfe3b6dd44e43c8
|
[
"MIT"
] | null | null | null |
mysite/chat/views.py
|
MJohnson159/CINS465-F18-Examples
|
c5fb0d6bdd0937e21e31ed1dbcfe3b6dd44e43c8
|
[
"MIT"
] | null | null | null |
# chat/views.py
from django.shortcuts import render
from django.utils.safestring import mark_safe
import json
# Create your views here.
def index(request):
return render(request, 'chat/index.html', {})
def room(request, room_name):
return render(request ,'chat/room.html', {
'room_name_json': mark_safe(json.dumps(room_name))
})
| 25.071429
| 58
| 0.720798
|
b6a26cda7c2b71e476c98c85b211cd375e5b0f1b
| 1,047
|
py
|
Python
|
{{cookiecutter.project_slug}}/djangocms-api/djangocms_api/urls.py
|
narfman0/zappa-examples
|
aeb08dd30743a5eda79bebcc66e9285d3171256e
|
[
"MIT"
] | 44
|
2016-12-16T04:48:23.000Z
|
2021-08-17T20:08:52.000Z
|
{{cookiecutter.project_slug}}/djangocms-api/djangocms_api/urls.py
|
narfman0/zappa-examples
|
aeb08dd30743a5eda79bebcc66e9285d3171256e
|
[
"MIT"
] | 1
|
2017-03-07T23:17:02.000Z
|
2017-03-08T08:07:24.000Z
|
{{cookiecutter.project_slug}}/djangocms-api/djangocms_api/urls.py
|
narfman0/zappa-examples
|
aeb08dd30743a5eda79bebcc66e9285d3171256e
|
[
"MIT"
] | 10
|
2017-07-17T17:02:09.000Z
|
2020-12-13T20:02:36.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.sitemaps import CMSSitemap
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.autodiscover()
urlpatterns = [
url(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap',
{'sitemaps': {'cmspages': CMSSitemap}}),
url(r'^select2/', include('django_select2.urls')),
url(r'^api/', include('djangocms_rest_api.urls', namespace='api')),
]
urlpatterns += i18n_patterns('',
url(r'^admin/', include(admin.site.urls)), # NOQA
url(r'^', include('cms.urls')),
)
# This is only needed when using runserver.
if settings.DEBUG:
urlpatterns = [
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
] + staticfiles_urlpatterns() + urlpatterns
| 33.774194
| 74
| 0.702006
|
ad171d04bddce02ac07bc4d0d2a074145f6350a1
| 3,019
|
py
|
Python
|
lct/secrets/secrets_provider.py
|
pathbreak/linode-cluster-toolkit
|
280257436105703c9a122e7ed111a72efa79adfc
|
[
"MIT"
] | 11
|
2017-07-19T15:25:39.000Z
|
2021-12-02T20:03:21.000Z
|
lct/secrets/secrets_provider.py
|
pathbreak/linode-cluster-toolkit
|
280257436105703c9a122e7ed111a72efa79adfc
|
[
"MIT"
] | null | null | null |
lct/secrets/secrets_provider.py
|
pathbreak/linode-cluster-toolkit
|
280257436105703c9a122e7ed111a72efa79adfc
|
[
"MIT"
] | 1
|
2021-12-02T20:03:22.000Z
|
2021-12-02T20:03:22.000Z
|
class SecretsProvider(object):
'''
Interface to be implemented by a secrets provider.
'''
def initialize(self, tk):
raise NotImplementedError('subclasses should override this')
def close(self):
raise NotImplementedError('subclasses should override this')
def get_v3_api_key(self, tkctx):
raise NotImplementedError('subclasses should override this')
def get_v4_personal_token(self, tkctx):
raise NotImplementedError('subclasses should override this')
def get_v4_oauth_token(self, tkctx):
raise NotImplementedError('subclasses should override this')
def get_v4_oauth_client_id(self, tkctx):
raise NotImplementedError('subclasses should override this')
def get_v4_oauth_client_secret(self, tkctx):
raise NotImplementedError('subclasses should override this')
def get_default_root_password(self, tkctx):
raise NotImplementedError('subclasses should override this')
def get_default_root_ssh_public_key(self, tkctx):
raise NotImplementedError('subclasses should override this')
def get_node_password(self, tkctx, node, user):
raise NotImplementedError('subclasses should override this')
def get_node_ssh_key(self, tkctx, node, user):
raise NotImplementedError('subclasses should override this')
def store_v3_api_key(self, tkctx, v3_api_key):
raise NotImplementedError('subclasses should override this')
def store_v4_personal_token(self, tkctx, v4_personal_token):
raise NotImplementedError('subclasses should override this')
def store_v4_oauth_token(self, tkctx, v4_oauth_token):
raise NotImplementedError('subclasses should override this')
def store_v4_oauth_client_id(self, tkctx, v4_oauth_client_id):
raise NotImplementedError('subclasses should override this')
def store_v4_oauth_client_secret(self, tkctx, v4_oauth_client_secret):
raise NotImplementedError('subclasses should override this')
def store_default_root_password(self, tkctx, default_root_password):
raise NotImplementedError('subclasses should override this')
def store_default_root_ssh_public_key(self, tkctx, default_root_ssh_public_key):
raise NotImplementedError('subclasses should override this')
def store_node_password(self, tkctx, node, user, password):
raise NotImplementedError('subclasses should override this')
def store_node_ssh_key(self, tkctx, node, user, ssh_key):
raise NotImplementedError('subclasses should override this')
| 29.891089
| 84
| 0.638953
|
f72fb70520827d455a85f96859c23ec6801cf6f3
| 5,184
|
py
|
Python
|
tests/test_entrypoint.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 1
|
2021-11-16T00:58:43.000Z
|
2021-11-16T00:58:43.000Z
|
tests/test_entrypoint.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 16
|
2021-05-17T19:24:47.000Z
|
2021-12-15T13:57:34.000Z
|
tests/test_entrypoint.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 1
|
2021-05-12T19:11:52.000Z
|
2021-05-12T19:11:52.000Z
|
import json
import pytest
from ..entrypoint import (
AudiobooksEntryPoint,
EbooksEntryPoint,
EntryPoint,
EverythingEntryPoint,
MediumEntryPoint,
)
from ..external_search import Filter
from ..model import Edition, Work
from ..testing import DatabaseTest
class TestEntryPoint(DatabaseTest):
def test_defaults(self):
everything, ebooks, audiobooks = EntryPoint.ENTRY_POINTS
assert EverythingEntryPoint == everything
assert EbooksEntryPoint == ebooks
assert AudiobooksEntryPoint == audiobooks
display = EntryPoint.DISPLAY_TITLES
assert "eBooks" == display[ebooks]
assert "Audiobooks" == display[audiobooks]
assert Edition.BOOK_MEDIUM == EbooksEntryPoint.INTERNAL_NAME
assert Edition.AUDIO_MEDIUM == AudiobooksEntryPoint.INTERNAL_NAME
assert "http://schema.org/CreativeWork" == everything.URI
for ep in (EbooksEntryPoint, AudiobooksEntryPoint):
assert ep.URI == Edition.medium_to_additional_type[ep.INTERNAL_NAME]
def test_no_changes(self):
# EntryPoint doesn't modify queries or search filters.
qu = self._db.query(Edition)
assert qu == EntryPoint.modify_database_query(self._db, qu)
args = dict(arg="value")
filter = object()
assert filter == EverythingEntryPoint.modify_search_filter(filter)
def test_register(self):
class Mock(object):
pass
args = [Mock, "Mock!"]
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(*args)
assert "must define INTERNAL_NAME" in str(excinfo.value)
# Test successful registration.
Mock.INTERNAL_NAME = "a name"
EntryPoint.register(*args)
assert Mock in EntryPoint.ENTRY_POINTS
assert "Mock!" == EntryPoint.DISPLAY_TITLES[Mock]
assert Mock not in EntryPoint.DEFAULT_ENABLED
# Can't register twice.
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(*args)
assert "Duplicate entry point internal name: a name" in str(excinfo.value)
EntryPoint.unregister(Mock)
# Test successful registration as a default-enabled entry point.
EntryPoint.register(*args, default_enabled=True)
assert Mock in EntryPoint.DEFAULT_ENABLED
# Can't register two different entry points with the same
# display name.
class Mock2(object):
INTERNAL_NAME = "mock2"
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(Mock2, "Mock!")
assert "Duplicate entry point display name: Mock!" in str(excinfo.value)
EntryPoint.unregister(Mock)
assert Mock not in EntryPoint.DEFAULT_ENABLED
class TestEverythingEntryPoint(DatabaseTest):
def test_no_changes(self):
# EverythingEntryPoint doesn't modify queries or searches
# beyond the default behavior for any entry point.
assert "All" == EverythingEntryPoint.INTERNAL_NAME
qu = self._db.query(Edition)
assert qu == EntryPoint.modify_database_query(self._db, qu)
args = dict(arg="value")
filter = object()
assert filter == EverythingEntryPoint.modify_search_filter(filter)
class TestMediumEntryPoint(DatabaseTest):
def test_modify_database_query(self):
# Create a video, and a entry point that contains videos.
work = self._work(with_license_pool=True)
work.license_pools[0].presentation_edition.medium = Edition.VIDEO_MEDIUM
class Videos(MediumEntryPoint):
INTERNAL_NAME = Edition.VIDEO_MEDIUM
qu = self._db.query(Work)
# The default entry points filter out the video.
for entrypoint in EbooksEntryPoint, AudiobooksEntryPoint:
modified = entrypoint.modify_database_query(self._db, qu)
assert [] == modified.all()
# But the video entry point includes it.
videos = Videos.modify_database_query(self._db, qu)
assert [work.id] == [x.id for x in videos]
def test_modify_search_filter(self):
class Mock(MediumEntryPoint):
INTERNAL_NAME = object()
filter = Filter(media=object())
Mock.modify_search_filter(filter)
assert [Mock.INTERNAL_NAME] == filter.media
class TestLibrary(DatabaseTest):
"""Test a Library's interaction with EntryPoints."""
def test_enabled_entrypoints(self):
l = self._default_library
setting = l.setting(EntryPoint.ENABLED_SETTING)
# When the value is not set, the default is used.
assert EntryPoint.DEFAULT_ENABLED == list(l.entrypoints)
setting.value = None
assert EntryPoint.DEFAULT_ENABLED == list(l.entrypoints)
# Names that don't correspond to registered entry points are
# ignored. Names that do are looked up.
setting.value = json.dumps(
["no such entry point", AudiobooksEntryPoint.INTERNAL_NAME]
)
assert [AudiobooksEntryPoint] == list(l.entrypoints)
# An empty list is a valid value.
setting.value = json.dumps([])
assert [] == list(l.entrypoints)
| 34.331126
| 82
| 0.673032
|
fe031d7387295bc55736d5930b5b3ce6b8825e73
| 2,768
|
py
|
Python
|
rdr_service/lib_fhir/fhirclient_3_0_0/server_tests.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 39
|
2017-10-13T19:16:27.000Z
|
2021-09-24T16:58:21.000Z
|
rdr_service/lib_fhir/fhirclient_3_0_0/server_tests.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 312
|
2017-09-08T15:42:13.000Z
|
2022-03-23T18:21:40.000Z
|
rdr_service/lib_fhir/fhirclient_3_0_0/server_tests.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 19
|
2017-09-15T13:58:00.000Z
|
2022-02-07T18:33:20.000Z
|
# -*- coding: utf-8 -*-
import io
import json
import os
import shutil
import unittest
from . import models.fhirabstractbase as fabst
from . import server
class TestServer(unittest.TestCase):
def tearDown(self):
if os.path.exists('metadata'):
os.remove('metadata')
def testValidCapabilityStatement(self):
shutil.copyfile('test_metadata_valid.json', 'metadata')
mock = MockServer()
mock.get_capability()
self.assertIsNotNone(mock.auth._registration_uri)
self.assertIsNotNone(mock.auth._authorize_uri)
self.assertIsNotNone(mock.auth._token_uri)
def testStateConservation(self):
shutil.copyfile('test_metadata_valid.json', 'metadata')
mock = MockServer()
self.assertIsNotNone(mock.capabilityStatement)
fhir = server.FHIRServer(None, state=mock.state)
self.assertIsNotNone(fhir.auth._registration_uri)
self.assertIsNotNone(fhir.auth._authorize_uri)
self.assertIsNotNone(fhir.auth._token_uri)
def testInvalidCapabilityStatement(self):
shutil.copyfile('test_metadata_invalid.json', 'metadata')
mock = MockServer()
try:
mock.get_capability()
self.assertTrue(False, "Must have thrown exception")
except fabst.FHIRValidationError as e:
self.assertTrue(4 == len(e.errors))
self.assertEqual("date:", str(e.errors[0])[:5])
self.assertEqual("format:", str(e.errors[1])[:7])
self.assertEqual("rest.0:", str(e.errors[2])[:7])
self.assertEqual("operation.1:", str(e.errors[2].errors[0])[:12])
self.assertEqual("definition:", str(e.errors[2].errors[0].errors[0])[:11])
self.assertEqual("reference:", str(e.errors[2].errors[0].errors[0].errors[0])[:10])
self.assertEqual("Wrong type <class 'dict'>", str(e.errors[2].errors[0].errors[0].errors[0].errors[0])[:25])
self.assertEqual("security:", str(e.errors[2].errors[1])[:9])
self.assertEqual("service.0:", str(e.errors[2].errors[1].errors[0])[:10])
self.assertEqual("coding.0:", str(e.errors[2].errors[1].errors[0].errors[0])[:9])
self.assertEqual("Superfluous entry \"systems\"", str(e.errors[2].errors[1].errors[0].errors[0].errors[0])[:27])
self.assertEqual("Superfluous entry \"formats\"", str(e.errors[3])[:27])
class MockServer(server.FHIRServer):
""" Reads local files.
"""
def __init__(self):
super().__init__(None, base_uri='https://fhir.smarthealthit.org')
def request_json(self, path, nosign=False):
assert path
with io.open(path, encoding='utf-8') as handle:
return json.load(handle)
return None
| 36.906667
| 124
| 0.637283
|
7a0785796983332e708c1989a20811a5630348fa
| 6,861
|
py
|
Python
|
bindings/python/tests/mesh/test-py-graph.py
|
Geode-solutions/OpenGeode
|
e47621989e6fc152f529d4e1e7e3b9ef9e7d6ccc
|
[
"MIT"
] | 64
|
2019-08-02T14:31:01.000Z
|
2022-03-30T07:46:50.000Z
|
bindings/python/tests/mesh/test-py-graph.py
|
Geode-solutions/OpenGeode
|
e47621989e6fc152f529d4e1e7e3b9ef9e7d6ccc
|
[
"MIT"
] | 395
|
2019-08-02T17:15:10.000Z
|
2022-03-31T15:10:27.000Z
|
bindings/python/tests/mesh/test-py-graph.py
|
Geode-solutions/OpenGeode
|
e47621989e6fc152f529d4e1e7e3b9ef9e7d6ccc
|
[
"MIT"
] | 8
|
2019-08-19T21:32:15.000Z
|
2022-03-06T18:41:10.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 - 2021 Geode-solutions
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys, platform
if sys.version_info >= (3,8,0) and platform.system() == "Windows":
for path in [x.strip() for x in os.environ['PATH'].split(';') if x]:
os.add_dll_directory(path)
import opengeode_py_mesh as mesh
def test_create_vertices( graph, builder ):
builder.create_vertex()
if graph.nb_vertices() != 1:
raise ValueError( "[Test] Graph should have 1 vertex" )
builder.create_vertices( 3 )
if graph.nb_vertices() != 4:
raise ValueError( "[Test] Graph should have 4 vertices" )
def test_delete_vertex( graph, builder ):
to_delete = [False] * graph.nb_vertices()
to_delete[0] = True
builder.delete_vertices( to_delete )
if graph.nb_vertices() != 3:
raise ValueError( "[Test] Graph should have 3 vertices" )
if graph.nb_edges() != 2:
raise ValueError( "[Test] Graph should have 2 edges" )
edges_around_0 = graph.edges_around_vertex( 0 )
if len( edges_around_0 ) != 1:
raise ValueError( "[Test] edges_around_0 should have 1 edge" )
if edges_around_0[0].edge_id != 1:
raise ValueError( "[Test] edges_around_0 has wrong value" )
if edges_around_0[0].vertex_id != 0:
raise ValueError( "[Test] edges_around_0 has wrong value" )
edges_around_2 = graph.edges_around_vertex( 2 )
if len( edges_around_2 ) != 1:
raise ValueError( "[Test] edges_around_2 should have 1 edge" )
if edges_around_2[0].edge_id != 0:
raise ValueError( "[Test] edges_around_2 has wrong value" )
if edges_around_2[0].vertex_id != 0:
raise ValueError( "[Test] edges_around_2 has wrong value" )
def test_create_edges( graph, builder ):
builder.create_edge_with_vertices( 0, 1 )
builder.create_edge_with_vertices( 0, 2 )
builder.create_edge_with_vertices( 3, 2 )
builder.create_edge_with_vertices( 1, 2 )
if graph.nb_edges() != 4:
raise ValueError( "[Test] Graph should have 4 edges" )
answer = [ 3, 2 ]
if graph.edge_vertices( 2 ) != answer:
raise ValueError( "[Test] Wrong edge vertices" )
edges_around_0 = graph.edges_around_vertex( 0 )
if len( edges_around_0 ) != 2:
raise ValueError( "[Test] edges_around_0 should have 2 edges" )
if edges_around_0[0].edge_id != 0:
raise ValueError( "[Test] edges_around_0 has wrong value" )
if edges_around_0[0].vertex_id != 0:
raise ValueError( "[Test] edges_around_0 has wrong value" )
if edges_around_0[1].edge_id != 1:
raise ValueError( "[Test] edges_around_0 has wrong value" )
if edges_around_0[1].vertex_id != 0:
raise ValueError( "[Test] edges_around_0 has wrong value" )
edges_around_2 = graph.edges_around_vertex( 2 )
if len( edges_around_2 ) != 3:
raise ValueError( "[Test] edges_around_2 should have 3 edges" )
if edges_around_2[0].edge_id != 1:
raise ValueError( "[Test] edges_around_2 has wrong value" )
if edges_around_2[0].vertex_id != 1:
raise ValueError( "[Test] edges_around_2 has wrong value" )
if edges_around_2[1].edge_id != 2:
raise ValueError( "[Test] edges_around_2 has wrong value" )
if edges_around_2[1].vertex_id != 1:
raise ValueError( "[Test] edges_around_2 has wrong value" )
if edges_around_2[2].edge_id != 3:
raise ValueError( "[Test] edges_around_2 has wrong value" )
if edges_around_2[2].vertex_id != 1:
raise ValueError( "[Test] edges_around_2 has wrong value" )
def test_delete_edge( graph, builder ):
to_delete = [False] * graph.nb_edges()
to_delete[0] = True
builder.delete_edges( to_delete )
if graph.nb_edges() != 1:
raise ValueError( "[Test] Graph should have 1 edge" )
if graph.edge_vertex( mesh.EdgeVertex( 0, 0 ) ) != 0:
raise ValueError( "[Test] Graph edge vertex index is not correct" )
if graph.edge_vertex( mesh.EdgeVertex( 0, 1 ) ) != 1:
raise ValueError( "[Test] Graph edge vertex index is not correct" )
builder.create_edges( 10 )
builder.set_edge_vertex( mesh.EdgeVertex( 1, 0 ), 1 )
builder.set_edge_vertex( mesh.EdgeVertex( 1, 1 ), 0 )
if graph.nb_edges() != 11:
raise ValueError( "[Test] Graph should have 11 edges" )
to_delete.extend( [True] * 9 )
builder.delete_edges( to_delete )
if graph.nb_edges() != 1:
raise ValueError( "[Test] Graph should have 1 edge" )
if graph.edge_vertex( mesh.EdgeVertex( 0, 0 ) ) != 1:
raise ValueError( "[Test] Graph edge vertex index is not correct (0, 0)" )
if graph.edge_vertex( mesh.EdgeVertex( 0, 1 ) ) != 0:
raise ValueError( "[Test] Graph edge vertex index is not correct (0, 1)" )
def test_io( graph, filename ):
mesh.save_graph( graph, filename )
new_graph = mesh.load_graph( filename )
def test_clone( graph ):
graph2 = graph.clone()
if graph2.nb_vertices() != 3:
raise ValueError( "[Test] Graph2 should have 3 vertices" )
if graph2.nb_edges() != 1:
raise ValueError( "[Test] Graph2 should have 1 edge" )
def test_delete_isolated_vertices( graph, builder ):
builder.delete_isolated_vertices()
if graph.nb_vertices() != 2:
raise ValueError( "[Test] Graph should have 2 vertices" )
if graph.nb_edges() != 1:
raise ValueError( "[Test] Graph2 should have 1 edge" )
if __name__ == '__main__':
graph = mesh.Graph.create()
builder = mesh.GraphBuilder.create( graph )
test_create_vertices( graph, builder )
test_create_edges( graph, builder )
test_io( graph, "test." + graph.native_extension() )
test_delete_vertex( graph, builder )
test_delete_edge( graph, builder )
test_clone( graph )
test_delete_isolated_vertices( graph, builder )
| 43.700637
| 82
| 0.679201
|
a2c66597ae26ac804c1bcc98622206cffdfb3fd5
| 845
|
py
|
Python
|
tests/core/types/test_proof_of_space.py
|
DONG-Jason/chia-blockchain
|
27b28d62f6b315e45bc00231e007c775f07a414a
|
[
"Apache-2.0"
] | null | null | null |
tests/core/types/test_proof_of_space.py
|
DONG-Jason/chia-blockchain
|
27b28d62f6b315e45bc00231e007c775f07a414a
|
[
"Apache-2.0"
] | null | null | null |
tests/core/types/test_proof_of_space.py
|
DONG-Jason/chia-blockchain
|
27b28d62f6b315e45bc00231e007c775f07a414a
|
[
"Apache-2.0"
] | null | null | null |
from secrets import token_bytes
from src.types.proof_of_space import ProofOfSpace # pylint: disable=E0401
from src.consensus.default_constants import DEFAULT_CONSTANTS
class TestProofOfSpace:
def test_can_create_proof(self):
"""
Tests that the change of getting a correct proof is exactly 1/target_filter.
"""
num_trials = 50000
success_count = 0
target_filter = 2 ** DEFAULT_CONSTANTS.NUMBER_ZERO_BITS_PLOT_FILTER
for _ in range(num_trials):
challenge_hash = token_bytes(32)
plot_id = token_bytes(32)
sp_output = token_bytes(32)
if ProofOfSpace.passes_plot_filter(DEFAULT_CONSTANTS, plot_id, challenge_hash, sp_output):
success_count += 1
assert abs((success_count * target_filter / num_trials) - 1) < 0.3
| 35.208333
| 102
| 0.68284
|
e2091bb0752e813f4faed788b9d729623ceba8d1
| 11,361
|
py
|
Python
|
TorchRecModel/src/com/sparrowrecsys/offline/pytorch/DeepFM.py
|
liangzhang-lz/SparrowRecSys
|
9fe1a27d3903117e6e2b5487c0689c0bd9281473
|
[
"Apache-2.0"
] | 1
|
2021-01-23T07:16:10.000Z
|
2021-01-23T07:16:10.000Z
|
TorchRecModel/src/com/sparrowrecsys/offline/pytorch/DeepFM.py
|
liangzhang-lz/SparrowRecSys
|
9fe1a27d3903117e6e2b5487c0689c0bd9281473
|
[
"Apache-2.0"
] | null | null | null |
TorchRecModel/src/com/sparrowrecsys/offline/pytorch/DeepFM.py
|
liangzhang-lz/SparrowRecSys
|
9fe1a27d3903117e6e2b5487c0689c0bd9281473
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import roc_auc_score
import math
import warnings
warnings.filterwarnings("ignore")
def _get_train_test_df(training_path, test_path, columns2Keep):
training_df = pd.read_csv(training_path, index_col=False)
test_df = pd.read_csv(test_path, index_col=False)
training_df.fillna(0, inplace=True)
test_df.fillna(0, inplace=True)
training_feature = training_df[columns2Keep]
training_label = training_df['label']
test_feature = test_df[columns2Keep]
test_label = test_df['label']
training_feature['userRatedMovie1'] = training_feature['userRatedMovie1'].astype('int64')
test_feature['userRatedMovie1'] = test_feature['userRatedMovie1'].astype('int64')
return training_feature, training_label, test_feature, test_label
class ModelDataSet(Dataset):
# Retrieve an item in every call
def __init__(self, input_DF, label_DF, sparse_col, dense_col):
self.df = input_DF
self.dense_df = input_DF.iloc[:, dense_col].astype(np.float32)
self.sparse_df = input_DF.iloc[:, sparse_col].astype('int64')
self.label = label_DF.astype(np.float32)
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sparse_feature = torch.tensor(self.sparse_df.iloc[idx])
dense_feature = torch.tensor(self.dense_df.iloc[idx])
label = torch.tensor(self.label.iloc[idx])
return {'Feature': (sparse_feature, dense_feature), 'Label': label}
class FM(nn.Module):
"""
Input shape
- 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 2D tensor with shape: ``(batch_size, 1)``.
"""
def __init__(self):
super().__init__()
def forward(self, inputs):
fm_input = inputs # (B,F,k)
square_of_sum = fm_input.sum(dim=1).pow(2) # (B,k)
sum_of_square = fm_input.pow(2).sum(dim=1) # (B,k)
cross_term = square_of_sum - sum_of_square
cross_term = 0.5 * cross_term.sum(dim=1, keepdim=True) # (B,1)
return cross_term
class DeepFM(nn.Module):
def __init__(self, sparse_col_size, dense_col_size):
# sparse_col_size: list[int]
# dense_col_size: int
super().__init__()
self.sparse_col_size = sparse_col_size
self.dense_col_size = dense_col_size
# We need same embedding size for FM layer
embedding_size = [10 for _ in range(len(sparse_col_size))]
# Embedding layer for all sparse features
sparse_embedding_list = []
for class_size, embed_size in zip(self.sparse_col_size, embedding_size):
embed_layer = nn.Embedding(class_size, embed_size, scale_grad_by_freq=True)
# init embed_layer
embed_layer.weight.data.uniform_(-1/math.sqrt(class_size), 1/math.sqrt(class_size))
sparse_embedding_list.append(embed_layer)
self.sparse_embedding_layer = nn.ModuleList(sparse_embedding_list)
# "Embedding layer" for dense feature
self.dense_embedding = nn.Linear(7, 10)
# 1st order linear layer
first_order_size = np.sum(sparse_col_size) + dense_col_size
self.linear_firstOrder = nn.Linear(first_order_size, 1)
# FM layer
self.fm = FM()
# total embedding size for deep layer
total_embedding_size = np.sum(embedding_size) + 10 # 10 is the embedding size of dense layer
# Deep side linear layers
self.linear1 = nn.Linear(total_embedding_size, 128)
self.linear2 = nn.Linear(128, 128)
self.linear3 = nn.Linear(128, 1) # final layer for deep side
def forward(self, sparse_feature, dense_feature):
if len(sparse_feature.shape) == 1: # 1D tensor converted to 2D tensor if batch_number == 1
sparse_feature = sparse_feature.view(1, -1)
dense_feature = dense_feature.view(1, -1)
# convert sparse feature to oneHot and Embedding
one_hot_list =[]
embedding_list=[]
for i in range(len(self.sparse_col_size)):
sparse_feature_input = sparse_feature[:, i] # batch x 1
class_size = self.sparse_col_size[i]
embedding_layer = self.sparse_embedding_layer[i]
one_hot_vec = F.one_hot(sparse_feature_input, num_classes=class_size).squeeze(1) # batch x class_number
embedding_output = embedding_layer(sparse_feature_input).squeeze(1) # batch x embedding_size
one_hot_list.append(one_hot_vec)
embedding_list.append(embedding_output)
# convert dense_feature to "embedding"
dense_feature_embedding = self.dense_embedding(dense_feature)
one_hot_list.append(dense_feature)
embedding_list.append(dense_feature_embedding)
# Prepare input for 1st order layer, FM, deep layer
sparse_one_hot = torch.cat(one_hot_list, dim=1) # B x (sum(one_hot)+10), 10 is the size of dense_embedding
sparse_embedding = torch.cat(embedding_list, dim=1) # B x (sum(embedding)+10), 10 is the size of dense_embedding
fm_embedding = torch.stack(embedding_list, dim=1) # B x field_number x 10
# linear layer
linear_logit = self.linear_firstOrder(sparse_one_hot)
# FM layer
FM_logit = self.fm(fm_embedding)
# Deep layer
deep_output = F.relu(self.linear1(sparse_embedding))
deep_output = F.relu(self.linear2(deep_output))
deep_logit = self.linear3(deep_output)
logit = linear_logit + FM_logit + deep_logit
return F.sigmoid(logit).view(-1)
class TrainEval:
def __init__(self, model, loss_fn, optim, device, train_dataloader, test_dataloader):
self.device = device
self.model = model.to(self.device)
self.optim = optim
self.loss_fn = loss_fn
self.train_dataloader = train_dataloader
self.test_dataloader = test_dataloader
self.threashold = 0.5 # threshold for positive class
def train(self, epochs):
self.model.train()
for epoch in range(epochs):
print("==========================================================")
print("start training epoch: {}".format(epoch+1))
loss_list = []
pred_list = []
label_list = []
iteration = 1
for train_data in self.train_dataloader:
sparse_feature = train_data['Feature'][0].to(self.device)
dense_feature = train_data['Feature'][1].to(self.device)
label = train_data['Label'].to(self.device)
prediction = self.model(sparse_feature, dense_feature)
pred_list.extend(prediction.tolist())
label_list.extend(label.tolist())
cur_loss = self.loss_fn(prediction, label)
loss_list.append(cur_loss.item())
cur_loss.backward()
self.optim.step()
self.optim.zero_grad()
# logging every 20 iteration
if iteration % 20 == 0:
print("---------------------------------------------------------")
print("epoch {}/{}, cur_iteration is {}, logloss is {:.2f}"
.format(epoch+1, epochs, iteration, cur_loss.item()))
iteration += 1
# validation every epoch
training_loss, training_accuracy, training_roc_score = self._get_metric(loss_list, pred_list, label_list)
print("==========================================================")
print("Result of epoch {}".format(epoch+1))
print(f"training loss: {training_loss:.2f}, accuracy: {training_accuracy:.3f}, roc_score: {training_roc_score:.2f}")
test_loss, test_accuracy, test_roc_score = self.eval()
print(f"test loss: {test_loss:.2f}, accuracy: {test_accuracy:.3f}, roc_score: {test_roc_score:.2f}")
def eval(self):
# return logloss, accuracy, roc_score
self.model.eval()
loss_list = []
pred_list = []
label_list = []
with torch.no_grad():
for test_data in self.test_dataloader:
sparse_feature = test_data['Feature'][0].to(self.device)
dense_feature = test_data['Feature'][1].to(self.device)
label = test_data['Label'].to(self.device)
prediction = self.model(sparse_feature, dense_feature)
cur_loss = self.loss_fn(prediction, label)
loss_list.append(cur_loss.item())
pred_list.extend(prediction.tolist())
label_list.extend(label.tolist())
return self._get_metric(loss_list, pred_list, label_list)
def _get_metric(self, loss_list, pred_list, label_list):
# return logloss, accuracy, roc_score
# average logloss
avg_loss = np.mean(loss_list)
# roc_score
roc_score = roc_auc_score(label_list, pred_list)
# average accuracy
pred_class_list = list(map(lambda x: 1 if x >= self.threashold else 0, pred_list))
correct_count = 0
for p, l in zip(pred_class_list, label_list):
if p == l:
correct_count += 1
avg_accuracy = correct_count / len(label_list)
return avg_loss, avg_accuracy, roc_score
if __name__ == "__main__":
folder_path = "/home/leon/Documents/SparrowRecSys/src/main/resources/webroot/sampledata"
training_path = folder_path + "/Pytorch_data/trainingSamples.csv"
test_path = folder_path + "/Pytorch_data/testSamples.csv"
columns2Keep = ['userId', 'userGenre1', 'userGenre2', 'userGenre3','userGenre4', 'userGenre5', 'scaleduserRatingCount',
'scaleduserAvgRating', 'scaleduserRatingStddev', 'userRatedMovie1', 'movieId', 'movieGenre1',
'movieGenre2', 'movieGenre3', 'scaledReleaseYear', 'scaledmovieRatingCount', 'scaledmovieAvgRating',
'scaledmovieRatingStddev']
training_feature, training_label, test_feature, test_label = _get_train_test_df(training_path, test_path, columns2Keep)
sparse_col = [0, 1, 2, 3, 4, 5, 9, 10, 11, 12, 13] # column_index of sparse features
sparse_col_size = [30001, 20, 20, 20, 20, 20, 1001, 1001, 20, 20, 20] # number of classes per sparse_feature
dense_col = [6, 7, 8, 14, 15, 16, 17]
training_dataset = ModelDataSet(training_feature, training_label, sparse_col, dense_col)
test_dataset = ModelDataSet(test_feature, test_label, sparse_col, dense_col)
BATCH_SIZE = 100
training_dataloader = DataLoader(training_dataset, batch_size=BATCH_SIZE, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE)
model = DeepFM(sparse_col_size, 7)
loss_fn = nn.BCELoss()
EPOCHS = 5
LR = 0.01
optimizer = optim.Adam(model.parameters(), lr=LR, weight_decay=0.001)
dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
train_eval = TrainEval(model, loss_fn, optimizer, dev, training_dataloader, test_dataloader)
train_eval.train(EPOCHS)
| 43.362595
| 128
| 0.641581
|
b97f77fca7d8665af467754dacf3a2879768a99c
| 3,999
|
py
|
Python
|
src/primaires/scripting/fonctions/joindre.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 14
|
2015-08-21T19:15:21.000Z
|
2017-11-26T13:59:17.000Z
|
src/primaires/scripting/fonctions/joindre.py
|
vincent-lg/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 20
|
2015-09-29T20:50:45.000Z
|
2018-06-21T12:58:30.000Z
|
src/primaires/scripting/fonctions/joindre.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 3
|
2015-05-02T19:42:03.000Z
|
2018-09-06T10:55:00.000Z
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction joindre."""
from primaires.scripting.fonction import Fonction
from primaires.scripting.instruction import ErreurExecution
class ClasseFonction(Fonction):
"""Convertit une liste en chaîne pour un affichage agréable."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.joindre, "list", "str")
cls.ajouter_types(cls.joindre, "list", "str", "str")
@staticmethod
def joindre(liste, lien, dernier=""):
"""Retourne une chaîne repre'sentant la liste jointe.
Les exemples plus bas sont assez explicites. L'idée est de convertir une liste de valeurs en une chaîne, souvent plus agréable à regarder.
Paramètres à préciser :
* liste : la liste à joindre ;
* lien : la valeur à mettre entre chaque élément de liste ;
* dernier (optionnel) : la valeur à mettre entre en dernier.
La liste peut contenir tout et n'importe quoi, mais il est
utile d'avoir une liste de chaînes, car on n'a pas de risques
d'aléas d'affichage dans ce cas. Si par exemple vous voulez
afficher une liste de joueurs, regrouper le nom des joueurs
dans une liste que vous pourrez joindre ainsi. Ce n'est pas
une obligation, mais cela évite des confusions. Consultez
les exemples ci-dessous pour voir le fonctionnement de base
de cette fonction.
Exemples d'utilisation :
liste = liste("abricot", "poire", "pomme", "banane")
fruits = joindre(liste, " et ")
# " et " sera placé entre chaque élément de la liste :
# chaine contient donc :
# "abricot et poire et pomme et banane"
fruits = joindre(liste, ", ", " et ")
# Ici, on veut mettre ", " entre chaque élément, excepté
# le dernier lien. chaine contient donc :
# "abricot, poire, pomme et banane"
# Ce qui est d'autant plus agréable.
participants = liste("Kredh", "Anael", "Eridan")
trier participants
participants = joindre(participants, " ")
# participants contient : "Anael Eridan Kredh"
"""
if not liste:
return ""
dernier = dernier or lien
liste = [str(e) for e in liste]
chaine = lien.join(liste[:-1])
if len(liste) > 1:
chaine += dernier
chaine += liste[-1]
return chaine
| 42.094737
| 146
| 0.684421
|
5fdae903927b702b5347d9dfc078e0667ec3b449
| 254
|
py
|
Python
|
tests/conftest.py
|
datacorder/elasmanager
|
5e089a85ed9d67da303e55838f338f9df34bc9bc
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
datacorder/elasmanager
|
5e089a85ed9d67da303e55838f338f9df34bc9bc
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
datacorder/elasmanager
|
5e089a85ed9d67da303e55838f338f9df34bc9bc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dummy conftest.py for elasmanager.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
# import pytest
| 21.166667
| 60
| 0.65748
|
7590706867751a87e67fa1a2696cf75871bfe122
| 6,988
|
py
|
Python
|
ioflo/base/test/test_acting.py
|
0486/ioflo-python-multiversion
|
aecb1f5047a3207360afdb9f954834c15ebdc360
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
ioflo/base/test/test_acting.py
|
0486/ioflo-python-multiversion
|
aecb1f5047a3207360afdb9f954834c15ebdc360
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
ioflo/base/test/test_acting.py
|
0486/ioflo-python-multiversion
|
aecb1f5047a3207360afdb9f954834c15ebdc360
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Unit Test Template
"""
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import os
from ioflo.test import testing
from ioflo.aid.consoling import getConsole
console = getConsole()
from ioflo.base import acting
from ioflo.base import doing
from ioflo.base import storing
def setUpModule():
console.reinit(verbosity=console.Wordage.concise)
def tearDownModule():
pass
class BasicTestCase(testing.FrameIofloTestCase):
"""
Example TestCase
"""
def setUp(self):
"""
Call super if override so House Framer and Frame are setup correctly
"""
super(BasicTestCase, self).setUp()
def tearDown(self):
"""
Call super if override so House Framer and Frame are torn down correctly
"""
super(BasicTestCase, self).tearDown()
def testActify(self):
"""
Test the actify decorator
"""
console.terse("{0}\n".format(self.testActify.__doc__))
@acting.actify("BlueBeard")
def bearded(self, x=1, y=2):
"""
Actor action method
"""
z = x + y
return (self, z)
self.assertIn("BlueBeard", acting.Actor.Registry)
actor, inits, ioinits, parms = acting.Actor.__fetch__("BlueBeard")
self.assertIs(actor._Parametric, True)
self.assertDictEqual(actor.Inits, {})
self.assertDictEqual(actor.Ioinits, {})
self.assertDictEqual(actor.Parms, {})
self.assertDictEqual(inits, {})
self.assertDictEqual(ioinits, {})
self.assertDictEqual(parms, {})
actor = actor() # create instance
self.assertIsInstance(actor, acting.Actor )
self.assertEqual(actor.__class__.__name__, "BlueBeard")
if sys.version > '3':
self.assertEqual(actor.action.__self__.__class__.__name__, "BlueBeard")
self.assertIs(actor.action.__func__, bearded)
self.assertIs(actor.action.__self__, actor)
else:
self.assertEqual(actor.action.im_class.__name__, "BlueBeard")
self.assertIs(actor.action.im_func, bearded)
self.assertIs(actor.action.im_self, actor)
self.assertEqual(actor.action.__doc__, '\n Actor action method\n ')
self.assertEqual(actor.action.__name__, 'bearded')
me, z = actor() # perform action
self.assertIs(me, actor)
self.assertEqual(z, 3)
def testDoify(self):
"""
Test the doify decorator
"""
console.terse("{0}\n".format(self.testDoify.__doc__))
@doing.doify("BlackSmith")
def blackened(self, x=3, y=2):
"""
Doer action method
"""
z = x + y
return (self, z)
self.assertIn("BlackSmith", doing.Doer.Registry)
actor, inits, ioinits, parms = doing.Doer.__fetch__("BlackSmith")
self.assertIs(actor._Parametric, False)
self.assertDictEqual(actor.Inits, {})
self.assertDictEqual(actor.Ioinits, {})
self.assertDictEqual(actor.Parms, {})
self.assertDictEqual(inits, {})
self.assertDictEqual(ioinits, {})
self.assertDictEqual(parms, {})
actor = actor() # create instance
self.assertIsInstance(actor, doing.Doer )
self.assertEqual(actor.__class__.__name__, "BlackSmith")
if sys.version > '3':
self.assertEqual(actor.action.__self__.__class__.__name__, "BlackSmith")
self.assertIs(actor.action.__func__, blackened)
self.assertIs(actor.action.__self__, actor)
else:
self.assertEqual(actor.action.im_class.__name__, "BlackSmith")
self.assertIs(actor.action.im_func, blackened)
self.assertIs(actor.action.im_self, actor)
self.assertEqual(actor.action.__doc__, '\n Doer action method\n ')
self.assertEqual(actor.action.__name__, 'blackened')
me, z = actor() # perform action
self.assertIs(me, actor)
self.assertEqual(z, 5)
def testFrameDoer(self):
"""
Test adding a Doer to a frame and running it
"""
console.terse("{0}\n".format(self.testFrameDoer.__doc__))
@doing.doify("TestDoer")
def action(self, a="Felgercarb", **kwa):
"""
Doer action method
"""
share = self.store.create(".test.a").update(value=a)
self.assertIn("TestDoer", doing.Doer.Registry)
act = self.addDoer("TestDoer")
self.assertIsInstance(act, acting.Act)
self.assertIn(act, self.frame.reacts)
self.assertEqual(act.actor, "TestDoer")
self.assertEqual(act.frame, self.frame.name)
self.resolve() # resolve House
self.assertIs(act.frame, self.frame)
self.assertIs(act.frame.framer, self.framer)
self.assertIs(act.actor.store, self.store)
self.assertIn(act.actor.name, doing.Doer.Registry)
self.assertEqual(act.actor.name, "TestDoer")
self.assertIsInstance(act.actor, doing.Doer)
self.assertEqual(act.actor.__class__.__name__, "TestDoer")
if sys.version > '3':
self.assertEqual(act.actor.action.__self__.__class__.__name__, "TestDoer")
self.assertIs(act.actor.action.__func__, action)
self.assertIs(act.actor.action.__self__, act.actor)
else:
self.assertEqual(act.actor.action.im_class.__name__, "TestDoer")
self.assertIs(act.actor.action.im_func, action)
self.assertIs(act.actor.action.im_self, act.actor)
self.assertEqual(act.actor.action.__doc__, '\n Doer action method\n ')
self.assertEqual(act.actor.action.__name__, 'action')
self.assertIs(self.store.fetch(".test.a"), None)
self.frame.recur() # run reacts in frame
share = self.store.fetch(".test.a")
self.assertIsInstance(share, storing.Share )
self.assertEqual(share.value, "Felgercarb")
def runOne(test):
'''
Unittest Runner
'''
test = BasicTestCase(test)
suite = unittest.TestSuite([test])
unittest.TextTestRunner(verbosity=2).run(suite)
def runSome():
""" Unittest runner """
tests = []
names = ['testActify',
'testDoify',
'testFrameDoer', ]
tests.extend(map(BasicTestCase, names))
suite = unittest.TestSuite(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
def runAll():
""" Unittest runner """
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(BasicTestCase))
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__' and __package__ is None:
#console.reinit(verbosity=console.Wordage.concise)
#runAll() #run all unittests
runSome()#only run some
#runOne('testBasic')
| 33.118483
| 100
| 0.617344
|
77ca77a57e941d36e2df2e0c83ee30f98b3aefd9
| 13,594
|
py
|
Python
|
tests/test_util.py
|
RossK1/exchangelib
|
5550c2fbcc064943e3b4e150f74a724e0bd0a9f3
|
[
"BSD-2-Clause"
] | 1,006
|
2016-07-18T16:42:55.000Z
|
2022-03-31T10:43:50.000Z
|
tests/test_util.py
|
RossK1/exchangelib
|
5550c2fbcc064943e3b4e150f74a724e0bd0a9f3
|
[
"BSD-2-Clause"
] | 966
|
2016-05-13T18:55:43.000Z
|
2022-03-31T15:24:56.000Z
|
tests/test_util.py
|
RossK1/exchangelib
|
5550c2fbcc064943e3b4e150f74a724e0bd0a9f3
|
[
"BSD-2-Clause"
] | 272
|
2016-04-05T02:17:10.000Z
|
2022-03-24T08:15:57.000Z
|
import io
from itertools import chain
import logging
import requests
import requests_mock
from exchangelib.errors import RelativeRedirect, TransportError, RateLimitError, RedirectError, UnauthorizedError,\
CASError
from exchangelib.protocol import FailFast, FaultTolerance
import exchangelib.util
from exchangelib.util import chunkify, peek, get_redirect_url, get_domain, PrettyXmlHandler, to_xml, BOM_UTF8, \
ParseError, post_ratelimited, safe_b64decode, CONNECTION_ERRORS
from .common import EWSTest, mock_post, mock_session_exception
class UtilTest(EWSTest):
def test_chunkify(self):
# Test tuple, list, set, range, map, chain and generator
seq = [1, 2, 3, 4, 5]
self.assertEqual(list(chunkify(seq, chunksize=2)), [[1, 2], [3, 4], [5]])
seq = (1, 2, 3, 4, 6, 7, 9)
self.assertEqual(list(chunkify(seq, chunksize=3)), [(1, 2, 3), (4, 6, 7), (9,)])
seq = {1, 2, 3, 4, 5}
self.assertEqual(list(chunkify(seq, chunksize=2)), [[1, 2], [3, 4], [5]])
seq = range(5)
self.assertEqual(list(chunkify(seq, chunksize=2)), [range(0, 2), range(2, 4), range(4, 5)])
seq = map(int, range(5))
self.assertEqual(list(chunkify(seq, chunksize=2)), [[0, 1], [2, 3], [4]])
seq = chain(*[[i] for i in range(5)])
self.assertEqual(list(chunkify(seq, chunksize=2)), [[0, 1], [2, 3], [4]])
seq = (i for i in range(5))
self.assertEqual(list(chunkify(seq, chunksize=2)), [[0, 1], [2, 3], [4]])
def test_peek(self):
# Test peeking into various sequence types
# tuple
is_empty, seq = peek(())
self.assertEqual((is_empty, list(seq)), (True, []))
is_empty, seq = peek((1, 2, 3))
self.assertEqual((is_empty, list(seq)), (False, [1, 2, 3]))
# list
is_empty, seq = peek([])
self.assertEqual((is_empty, list(seq)), (True, []))
is_empty, seq = peek([1, 2, 3])
self.assertEqual((is_empty, list(seq)), (False, [1, 2, 3]))
# set
is_empty, seq = peek(set())
self.assertEqual((is_empty, list(seq)), (True, []))
is_empty, seq = peek({1, 2, 3})
self.assertEqual((is_empty, list(seq)), (False, [1, 2, 3]))
# range
is_empty, seq = peek(range(0))
self.assertEqual((is_empty, list(seq)), (True, []))
is_empty, seq = peek(range(1, 4))
self.assertEqual((is_empty, list(seq)), (False, [1, 2, 3]))
# map
is_empty, seq = peek(map(int, []))
self.assertEqual((is_empty, list(seq)), (True, []))
is_empty, seq = peek(map(int, [1, 2, 3]))
self.assertEqual((is_empty, list(seq)), (False, [1, 2, 3]))
# generator
is_empty, seq = peek((i for i in []))
self.assertEqual((is_empty, list(seq)), (True, []))
is_empty, seq = peek((i for i in [1, 2, 3]))
self.assertEqual((is_empty, list(seq)), (False, [1, 2, 3]))
@requests_mock.mock()
def test_get_redirect_url(self, m):
m.get('https://httpbin.org/redirect-to', status_code=302, headers={'location': 'https://example.com/'})
r = requests.get('https://httpbin.org/redirect-to?url=https://example.com/', allow_redirects=False)
self.assertEqual(get_redirect_url(r), 'https://example.com/')
m.get('https://httpbin.org/redirect-to', status_code=302, headers={'location': 'http://example.com/'})
r = requests.get('https://httpbin.org/redirect-to?url=http://example.com/', allow_redirects=False)
self.assertEqual(get_redirect_url(r), 'http://example.com/')
m.get('https://httpbin.org/redirect-to', status_code=302, headers={'location': '/example'})
r = requests.get('https://httpbin.org/redirect-to?url=/example', allow_redirects=False)
self.assertEqual(get_redirect_url(r), 'https://httpbin.org/example')
m.get('https://httpbin.org/redirect-to', status_code=302, headers={'location': 'https://example.com'})
with self.assertRaises(RelativeRedirect):
r = requests.get('https://httpbin.org/redirect-to?url=https://example.com', allow_redirects=False)
get_redirect_url(r, require_relative=True)
m.get('https://httpbin.org/redirect-to', status_code=302, headers={'location': '/example'})
with self.assertRaises(RelativeRedirect):
r = requests.get('https://httpbin.org/redirect-to?url=/example', allow_redirects=False)
get_redirect_url(r, allow_relative=False)
def test_to_xml(self):
to_xml(b'<?xml version="1.0" encoding="UTF-8"?><foo></foo>')
to_xml(BOM_UTF8+b'<?xml version="1.0" encoding="UTF-8"?><foo></foo>')
to_xml(BOM_UTF8+b'<?xml version="1.0" encoding="UTF-8"?><foo>&broken</foo>')
with self.assertRaises(ParseError):
to_xml(b'foo')
try:
to_xml(b'<t:Foo><t:Bar>Baz</t:Bar></t:Foo>')
except ParseError as e:
# Not all lxml versions throw an error here, so we can't use assertRaises
self.assertIn('Offending text: [...]<t:Foo><t:Bar>Baz</t[...]', e.args[0])
def test_get_domain(self):
self.assertEqual(get_domain('foo@example.com'), 'example.com')
with self.assertRaises(ValueError):
get_domain('blah')
def test_pretty_xml_handler(self):
# Test that a normal, non-XML log record is passed through unchanged
stream = io.StringIO()
stream.isatty = lambda: True
h = PrettyXmlHandler(stream=stream)
self.assertTrue(h.is_tty())
r = logging.LogRecord(
name='baz', level=logging.INFO, pathname='/foo/bar', lineno=1, msg='hello', args=(), exc_info=None
)
h.emit(r)
h.stream.seek(0)
self.assertEqual(h.stream.read(), 'hello\n')
# Test formatting of an XML record. It should contain newlines and color codes.
stream = io.StringIO()
stream.isatty = lambda: True
h = PrettyXmlHandler(stream=stream)
r = logging.LogRecord(
name='baz', level=logging.DEBUG, pathname='/foo/bar', lineno=1, msg='hello %(xml_foo)s',
args=({'xml_foo': b'<?xml version="1.0" encoding="UTF-8"?><foo>bar</foo>'},), exc_info=None)
h.emit(r)
h.stream.seek(0)
self.assertEqual(
h.stream.read(),
"hello \x1b[36m<?xml version='1.0' encoding='utf-8'?>\x1b[39;49;00m\n\x1b[94m"
"<foo\x1b[39;49;00m\x1b[94m>\x1b[39;49;00mbar\x1b[94m</foo>\x1b[39;49;00m\n"
)
def test_post_ratelimited(self):
url = 'https://example.com'
protocol = self.account.protocol
orig_policy = protocol.config.retry_policy
RETRY_WAIT = exchangelib.util.RETRY_WAIT
MAX_REDIRECTS = exchangelib.util.MAX_REDIRECTS
session = protocol.get_session()
try:
# Make sure we fail fast in error cases
protocol.config.retry_policy = FailFast()
# Test the straight, HTTP 200 path
session.post = mock_post(url, 200, {}, 'foo')
r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='')
self.assertEqual(r.content, b'foo')
# Test exceptions raises by the POST request
for err_cls in CONNECTION_ERRORS:
session.post = mock_session_exception(err_cls)
with self.assertRaises(err_cls):
r, session = post_ratelimited(
protocol=protocol, session=session, url='http://', headers=None, data='')
# Test bad exit codes and headers
session.post = mock_post(url, 401, {})
with self.assertRaises(UnauthorizedError):
r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='')
session.post = mock_post(url, 999, {'connection': 'close'})
with self.assertRaises(TransportError):
r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='')
session.post = mock_post(url, 302,
{'location': '/ews/genericerrorpage.htm?aspxerrorpath=/ews/exchange.asmx'})
with self.assertRaises(TransportError):
r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='')
session.post = mock_post(url, 503, {})
with self.assertRaises(TransportError):
r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='')
# No redirect header
session.post = mock_post(url, 302, {})
with self.assertRaises(TransportError):
r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='')
# Redirect header to same location
session.post = mock_post(url, 302, {'location': url})
with self.assertRaises(TransportError):
r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='')
# Redirect header to relative location
session.post = mock_post(url, 302, {'location': url + '/foo'})
with self.assertRaises(RedirectError):
r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='')
# Redirect header to other location and allow_redirects=False
session.post = mock_post(url, 302, {'location': 'https://contoso.com'})
with self.assertRaises(TransportError):
r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='')
# Redirect header to other location and allow_redirects=True
exchangelib.util.MAX_REDIRECTS = 0
session.post = mock_post(url, 302, {'location': 'https://contoso.com'})
with self.assertRaises(TransportError):
r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='',
allow_redirects=True)
# CAS error
session.post = mock_post(url, 999, {'X-CasErrorCode': 'AAARGH!'})
with self.assertRaises(CASError):
r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='')
# Allow XML data in a non-HTTP 200 response
session.post = mock_post(url, 500, {}, '<?xml version="1.0" ?><foo></foo>')
r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='')
self.assertEqual(r.content, b'<?xml version="1.0" ?><foo></foo>')
# Bad status_code and bad text
session.post = mock_post(url, 999, {})
with self.assertRaises(TransportError):
r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='')
# Test rate limit exceeded
exchangelib.util.RETRY_WAIT = 1
protocol.config.retry_policy = FaultTolerance(max_wait=0.5) # Fail after first RETRY_WAIT
session.post = mock_post(url, 503, {'connection': 'close'})
# Mock renew_session to return the same session so the session object's 'post' method is still mocked
protocol.renew_session = lambda s: s
with self.assertRaises(RateLimitError) as rle:
r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='')
self.assertEqual(rle.exception.status_code, 503)
self.assertEqual(rle.exception.url, url)
self.assertTrue(1 <= rle.exception.total_wait < 2) # One RETRY_WAIT plus some overhead
# Test something larger than the default wait, so we retry at least once
protocol.retry_policy.max_wait = 3 # Fail after second RETRY_WAIT
session.post = mock_post(url, 503, {'connection': 'close'})
with self.assertRaises(RateLimitError) as rle:
r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='')
self.assertEqual(rle.exception.status_code, 503)
self.assertEqual(rle.exception.url, url)
# We double the wait for each retry, so this is RETRY_WAIT + 2*RETRY_WAIT plus some overhead
self.assertTrue(3 <= rle.exception.total_wait < 4, rle.exception.total_wait)
finally:
protocol.retire_session(session) # We have patched the session, so discard it
# Restore patched attributes and functions
protocol.config.retry_policy = orig_policy
exchangelib.util.RETRY_WAIT = RETRY_WAIT
exchangelib.util.MAX_REDIRECTS = MAX_REDIRECTS
try:
delattr(protocol, 'renew_session')
except AttributeError:
pass
def test_safe_b64decode(self):
# Test correctly padded string
self.assertEqual(safe_b64decode('SGVsbG8gd29ybGQ='), b'Hello world')
# Test incorrectly padded string
self.assertEqual(safe_b64decode('SGVsbG8gd29ybGQ'), b'Hello world')
# Test binary data
self.assertEqual(safe_b64decode(b'SGVsbG8gd29ybGQ='), b'Hello world')
# Test incorrectly padded binary data
self.assertEqual(safe_b64decode(b'SGVsbG8gd29ybGQ'), b'Hello world')
| 50.348148
| 119
| 0.612182
|
53131daa8982e591f9583c5f7c4ee47745ff3d1c
| 1,812
|
py
|
Python
|
model_compression_toolkit/core/common/network_editors/edit_network.py
|
reuvenperetz/model_optimization
|
40de02d56750ee4cc20e693da63bc2e70b4d20e6
|
[
"Apache-2.0"
] | 42
|
2021-10-31T10:17:49.000Z
|
2022-03-21T08:51:46.000Z
|
model_compression_toolkit/core/common/network_editors/edit_network.py
|
reuvenperetz/model_optimization
|
40de02d56750ee4cc20e693da63bc2e70b4d20e6
|
[
"Apache-2.0"
] | 6
|
2021-10-31T15:06:03.000Z
|
2022-03-31T10:32:53.000Z
|
model_compression_toolkit/core/common/network_editors/edit_network.py
|
reuvenperetz/model_optimization
|
40de02d56750ee4cc20e693da63bc2e70b4d20e6
|
[
"Apache-2.0"
] | 18
|
2021-11-01T12:16:43.000Z
|
2022-03-25T16:52:37.000Z
|
# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import copy
from typing import List
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
from model_compression_toolkit.core.common.graph.base_graph import Graph
from model_compression_toolkit.core.common.network_editors import EditRule
def edit_network_graph(graph: Graph,
fw_info: FrameworkInfo,
network_editor: List[EditRule]):
"""
Apply a list of edit rules on a graph.
Args:
graph_to_edit: The graph to edit.
fw_info: Information needed for quantization about the specific framework (e.g., kernel channels indices,
groups of layers by how they should be quantized, etc.)
network_editor: List of edit rules to apply to the graph.
Returns:
The graph after it has been applied the edit rules from the network editor list.
"""
# graph = copy.deepcopy(graph_to_edit)
for edit_rule in network_editor:
filtered_nodes = graph.filter(edit_rule.filter)
for node in filtered_nodes:
edit_rule.action.apply(node, graph, fw_info)
# return graph
| 40.266667
| 113
| 0.695916
|
e591f79c17c9db60e0aabe45337e6f856eeb220b
| 531
|
py
|
Python
|
server/sickbeats/orm/db_mixins.py
|
byronmejia/sick-beats
|
22a38d38587f84e534b004c0e74dae51edfbadd1
|
[
"MIT"
] | 1
|
2018-03-28T00:28:29.000Z
|
2018-03-28T00:28:29.000Z
|
server/sickbeats/orm/db_mixins.py
|
byronmejia/sick-beats
|
22a38d38587f84e534b004c0e74dae51edfbadd1
|
[
"MIT"
] | 13
|
2017-12-27T02:54:48.000Z
|
2018-07-09T23:14:41.000Z
|
server/sickbeats/orm/db_mixins.py
|
byronmejia/sick-beats
|
22a38d38587f84e534b004c0e74dae51edfbadd1
|
[
"MIT"
] | 4
|
2017-12-27T05:46:51.000Z
|
2018-07-01T04:57:42.000Z
|
from passlib.hash import pbkdf2_sha512
import uuid
from sickbeats.app import db
class IDMixin(object):
id = db.Column(db.Integer, primary_key=True, autoincrement=True, nullable=False)
class HashedPasswordMixin(object):
password = db.Column(db.String, nullable=False)
salt = db.Column(db.String, nullable=False)
@classmethod
def generate_salt(cls):
return uuid.uuid4().hex
@classmethod
def hash_password(cls, input_password, salt):
return pbkdf2_sha512.hash(input_password + salt)
| 24.136364
| 84
| 0.728814
|
f2b58ad5d7db933a5c983a5aaca7c66b68adaa66
| 1,796
|
py
|
Python
|
test/chemistry/test_driver_pyquante.py
|
stefan-woerner/aqua
|
12e1b867e254977d9c5992612a7919d8fe016cb4
|
[
"Apache-2.0"
] | 504
|
2018-12-15T16:34:03.000Z
|
2022-03-26T11:24:53.000Z
|
test/chemistry/test_driver_pyquante.py
|
stefan-woerner/aqua
|
12e1b867e254977d9c5992612a7919d8fe016cb4
|
[
"Apache-2.0"
] | 746
|
2018-12-16T16:44:42.000Z
|
2021-07-10T16:59:43.000Z
|
test/chemistry/test_driver_pyquante.py
|
stefan-woerner/aqua
|
12e1b867e254977d9c5992612a7919d8fe016cb4
|
[
"Apache-2.0"
] | 421
|
2018-12-22T14:49:00.000Z
|
2022-03-04T09:47:07.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Driver PyQuante """
import unittest
from test.chemistry import QiskitChemistryTestCase
from test.chemistry.test_driver import TestDriver
from qiskit.chemistry.drivers import PyQuanteDriver, UnitsType, BasisType
from qiskit.chemistry import QiskitChemistryError
class TestDriverPyQuante(QiskitChemistryTestCase, TestDriver):
"""PYQUANTE Driver tests."""
def setUp(self):
super().setUp()
try:
driver = PyQuanteDriver(atoms='H .0 .0 .0; H .0 .0 0.735',
units=UnitsType.ANGSTROM,
charge=0,
multiplicity=1,
basis=BasisType.BSTO3G)
except QiskitChemistryError:
self.skipTest('PYQUANTE driver does not appear to be installed')
self.qmolecule = driver.run()
class TestDriverPyQuanteMolecule(QiskitChemistryTestCase, TestDriver):
"""PYQUANTE Driver molecule tests."""
def setUp(self):
super().setUp()
try:
driver = PyQuanteDriver(molecule=TestDriver.MOLECULE)
except QiskitChemistryError:
self.skipTest('PYQUANTE driver does not appear to be installed')
self.qmolecule = driver.run()
if __name__ == '__main__':
unittest.main()
| 34.538462
| 77
| 0.660913
|
326816f7d19af21ae1da60ca5eac4191e7de4032
| 1,113
|
py
|
Python
|
main/models/customfieldenumgroup.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
main/models/customfieldenumgroup.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
main/models/customfieldenumgroup.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# CustomFieldEnumGroup model
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.model.base import NOCModel
from noc.core.model.decorator import on_delete_check
@on_delete_check(
check=[("main.CustomField", "enum_group"), ("main.CustomFieldEnumValue", "enum_group")]
)
class CustomFieldEnumGroup(NOCModel):
"""
Enumeration groups for custom fields
"""
class Meta(object):
verbose_name = "Enum Group"
verbose_name_plural = "Enum Groups"
db_table = "main_customfieldenumgroup"
app_label = "main"
name = models.CharField("Name", max_length=128, unique=True)
is_active = models.BooleanField("Is Active", default=True)
description = models.TextField("Description", null=True, blank=True)
def __str__(self):
return self.name
| 30.916667
| 91
| 0.571429
|
4d259a4fe94cc67a617bfe7d0e34f7166fd829b6
| 5,319
|
py
|
Python
|
src/ged_pred/gp_data_processing.py
|
hwwang55/MolR
|
aaa968c23bea5ed9209e29ab0d21ca9cc04c91bf
|
[
"MIT"
] | 24
|
2021-09-26T00:46:18.000Z
|
2022-03-28T12:12:34.000Z
|
src/ged_pred/gp_data_processing.py
|
tiger-tiger/MolR
|
9c0ee2b30ae6390cef37e15cccece53274d61860
|
[
"MIT"
] | null | null | null |
src/ged_pred/gp_data_processing.py
|
tiger-tiger/MolR
|
9c0ee2b30ae6390cef37e15cccece53274d61860
|
[
"MIT"
] | 6
|
2021-11-16T06:13:27.000Z
|
2022-02-22T12:10:17.000Z
|
import os
import random
import dgl
import torch
import pickle
import pysmiles
import itertools
import multiprocessing as mp
from data_processing import networkx_to_dgl
from networkx.algorithms.similarity import graph_edit_distance
random.seed(0)
class GEDPredDataset(dgl.data.DGLDataset):
def __init__(self, args):
self.args = args
self.path = '../data/' + args.dataset + '/'
self.graphs1 = []
self.graphs2 = []
self.targets = []
super().__init__(name='ged_pred_' + args.dataset)
def to_gpu(self):
if torch.cuda.is_available():
print('moving ' + self.args.dataset + ' dataset to GPU')
self.graphs1 = [graph.to('cuda:' + str(self.args.gpu)) for graph in self.graphs1]
self.graphs2 = [graph.to('cuda:' + str(self.args.gpu)) for graph in self.graphs2]
def save(self):
print('saving ' + self.args.dataset + ' dataset to ' + self.path + 'ged0.bin and ' + self.path + 'ged1.bin')
dgl.save_graphs(self.path + 'ged0.bin', self.graphs1, {'target': self.targets})
dgl.save_graphs(self.path + 'ged1.bin', self.graphs2)
def load(self):
print('loading ' + self.args.dataset + ' dataset from ' + self.path + 'ged0.bin and ' + self.path + 'ged1.bin')
self.graphs1, self.targets = dgl.load_graphs(self.path + 'ged0.bin')
self.graphs2, _ = dgl.load_graphs(self.path + 'ged1.bin')
self.targets = self.targets['target']
self.to_gpu()
def process(self):
print('loading feature encoder from ../saved/' + self.args.pretrained_model + '/feature_enc.pkl')
with open('../saved/' + self.args.pretrained_model + '/feature_enc.pkl', 'rb') as f:
feature_encoder = pickle.load(f)
molecule_list = self.get_molecule_list()
samples = self.sample(molecule_list)
res = calculate_ged_with_mp(samples, self.args.n_pairs)
with open(self.path + 'pairwise_ged.csv', 'w') as f:
f.writelines('smiles1,smiles2,ged\n')
for g1, g2, s1, s2, ged in res:
self.graphs1.append(networkx_to_dgl(g1, feature_encoder))
self.graphs2.append(networkx_to_dgl(g2, feature_encoder))
self.targets.append(ged)
f.writelines(s1 + ',' + s2 + ',' + str(ged) + '\n')
self.targets = torch.Tensor(self.targets)
self.to_gpu()
def has_cache(self):
if os.path.exists(self.path + 'ged0.bin') and os.path.exists(self.path + 'ged1.bin'):
print('cache found')
return True
else:
print('cache not found')
return False
def __getitem__(self, i):
return self.graphs1[i], self.graphs2[i], self.targets[i]
def __len__(self):
return len(self.graphs1)
def get_molecule_list(self):
print('retrieving the first %d molecules from %s dataset' % (self.args.n_molecules, self.args.dataset))
molecule_list = []
with open(self.path + self.args.dataset + '.csv') as f:
for idx, line in enumerate(f.readlines()):
if idx == 0 or line == '\n':
continue
if idx > self.args.n_molecules:
break
items = line.strip().split(',')
if self.args.dataset == 'QM9':
smiles = items[1]
else:
raise ValueError('unknown dataset')
raw_graph = pysmiles.read_smiles(smiles, zero_order_bonds=False)
molecule_list.append((raw_graph, smiles))
return molecule_list
def sample(self, molecule_list):
print('sampling %d pairs' % self.args.n_pairs)
all_pairs = list(itertools.combinations(molecule_list, 2))
samples = random.sample(all_pairs, self.args.n_pairs)
return samples
def calculate_ged_with_mp(samples, n_pairs):
print('calculating GED using multiprocessing')
n_cores, pool, range_list = get_params_for_mp(n_pairs)
res = pool.map(calculate_ged, zip([samples[i[0]: i[1]] for i in range_list], range(n_cores)))
print('gathering results')
res = [i for sublist in res for i in sublist]
return res
def get_params_for_mp(n_pairs):
n_cores = mp.cpu_count()
pool = mp.Pool(n_cores)
avg = n_pairs // n_cores
range_list = []
start = 0
for i in range(n_cores):
num = avg + 1 if i < n_pairs - avg * n_cores else avg
range_list.append([start, start + num])
start += num
return n_cores, pool, range_list
def calculate_ged(inputs):
def node_match(n1, n2):
return n1['element'] == n2['element'] and n1['charge'] == n2['charge']
def edge_match(e1, e2):
return e1['order'] == e2['order']
res = []
samples, pid = inputs
for i, graph_pair in enumerate(samples):
g1, g2 = graph_pair
graph1, smiles1 = g1
graph2, smiles2 = g2
ged = graph_edit_distance(graph1, graph2, node_match=node_match, edge_match=edge_match)
res.append((graph1, graph2, smiles1, smiles2, ged))
if i % 100 == 0:
print('pid %d: %d / %d' % (pid, i, len(samples)))
print('pid %d done' % pid)
return res
def load_data(args):
data = GEDPredDataset(args)
return data
| 35.939189
| 119
| 0.602369
|
1da74b84cc031dd042448f06d64b9b2e06a9c79c
| 5,650
|
py
|
Python
|
entities.py
|
ananlvjiao/yi_noob
|
61cdb8ef31bce0bb6c8100bc35c516b06982c0be
|
[
"MIT"
] | null | null | null |
entities.py
|
ananlvjiao/yi_noob
|
61cdb8ef31bce0bb6c8100bc35c516b06982c0be
|
[
"MIT"
] | null | null | null |
entities.py
|
ananlvjiao/yi_noob
|
61cdb8ef31bce0bb6c8100bc35c516b06982c0be
|
[
"MIT"
] | null | null | null |
from enum import Enum, unique
@unique
class Element(Enum):
Wood = 0
Fire = 1
Earth = 2
Metal = 3
Water = 4
# which elem overcome me
def ke_wo(self):
return Element((self.value-2)%5)
# which elem generate me
def sheng_wo(self):
return Element((self.value-1)%5)
# which elem I overcome
def wo_ke(self):
return Element((self.value+2)%5)
# which elem I generate
def wo_sheng(self):
return Element((self.value+1)%5)
class Yao(Enum):
LaoYin = 0
ShaoYin = 1
ShaoYang = 2
LaoYang = 3
def dong(self):
if not self.is_dong_able():
return self
else:
return Yao.ShaoYang if self is Yao.LaoYin else Yao.ShaoYin
def is_dong_able(self):
return self is Yao.LaoYang or self is Yao.LaoYin
def val(self):
return int(self.value >= 2)
class BaGua(Enum):
Qian = 8, '乾'
Dui = 7, '兑'
Li = 6, '离'
Zhen = 5, '震'
Xun = 4, '巽'
Kan = 3, '坎'
Gen = 2, '艮'
Kun = 1, '坤'
def __init__(self, val, fname):
self._value_= val
self.full_name = fname
def display(self):
bit_arr = '{0:03b}'.format(self._value_-1)
yang_str = '-'*8
yin_str = '-'*3+' '*2+'-'*3
desc = "\n".join(yang_str if int(v)==1 else yin_str for v in bit_arr)
return desc
@classmethod
def Init(cls, gua):
val = 0
for bit in gua:
val = (val << 1) | bit
for m in cls:
if m._value_ == val+1:
return m
return None
# the symbol stores the integer mapping to Unicode 4 Yijing Symbols
# the sequence of the symbols is based on the Manifested Hexagrams
# Ref https://www.unicode.org/charts/PDF/U4DC0.pdf
class Hexagrams(Enum):
Kun1 = 1, 19905, '坤为地', '坤'
Bo = 2, 19926, '山地剥', '剥'
Bi3 = 3, 19911, '水地比', '比'
Guan = 4, 19923, '风地观', '观'
Yu = 5, 19919, '雷地豫', '豫'
Jin = 6, 19938, '火地晋', '晋'
Cui = 7, 19948, '泽地萃', '萃'
Pi = 8, 19915, '天地否', '否'
Qian1 = 9, 19918, '地山谦', '谦'
Gen = 10, 19955, '艮为山', '艮'
Jian3 = 11, 19942, '水山蹇', '蹇'
Jian4 = 12, 19956, '风山渐', '渐'
XiaoGuo = 13, 19965, '雷山小过', '小过'
Lu = 14, 19959, '火山旅', '旅'
Xian = 15, 19934, '泽山咸', '咸'
Dun = 16, 19936, '天山遁', '遁'
Shi = 17, 19910, '地水师', '师'
Meng = 18, 19907, '山水蒙', '蒙'
Kan = 19, 19932, '坎为水', '坎'
Huan = 20, 19962, '风水涣', '涣'
Xie = 21, 19943, '雷水解', '解'
WeiJi = 22, 19967, '火水未济', '未济'
Kun4 = 23, 19950, '泽水困', '困'
Song = 24, 19909, '天水讼', '讼'
Sheng = 25, 19949, '地风升', '升'
Gu = 26, 19921, '山风蛊', '蛊'
Jing = 27, 19951, '水风井', '井'
Xun = 28, 19960, '巽为风', '巽'
Heng = 29, 19935, '雷风恒', '恒'
Ding = 30, 19953, '火风鼎', '鼎'
DaGuo = 31, 19931, '泽风大过', '大过'
Gou = 32, 19947, '天风媾', '媾'
Fu = 33, 19927, '地雷复', '复'
Yi2 = 34, 19930, '山雷颐', '颐'
Zhun = 35, 19906, '水雷屯', '屯'
Yi4 = 36, 19945, '风雷益', '益'
Zhen = 37, 19954, '震为雷', '震'
ShiHe = 38, 19924, '火雷噬嗑', '噬嗑'
Sui = 39, 19920, '泽雷随', '随'
WuWang = 40, 19928, '天雷无妄', '无妄'
MingYi = 41, 19939, '地火明夷', '明夷'
Bi4 = 42, 19925, '山火贲', '贲'
JiJi = 43, 19966, '水火既济', '既济'
JiaRen = 44, 19940, '风火家人', '家人'
Feng = 45, 19958, '雷火丰', '丰'
Li = 46, 19933, '离为火', '离'
Ge = 47, 19952, '泽火革', '革'
TongRen = 48, 19916, '天火同人', '同人'
Lin = 49, 19922, '地泽临', '临'
Sun = 50, 19944, '山泽损', '损'
Jie = 51, 19963, '水泽节', '节'
ZhongFu = 52, 19964, '风泽中孚', '中孚'
GuiMei = 53, 19957, '雷泽归妹', '归妹'
Kui = 54, 19941, '火泽睽', '睽'
Dui = 55, 19961, '兑为泽', '兑'
Lv = 56, 19913, '天泽履', '履'
Tai = 57, 19914, '地天泰', '泰'
DaXu = 58, 19929, '山天大畜', '大畜'
Xu = 59, 19908, '水天需', '需'
XiaoXu = 60, 19912, '风天小畜', '小畜'
DaZhuang = 61, 19937, '雷天大壮', '大壮'
DaYou = 62, 19917, '火天大有', '大有'
Guai = 63, 19946, '泽天夬', '夬'
Qian2 = 64, 19904, '乾为天', '乾'
def __init__(self, val, ucode, fname, sname):
self._value_= val
self.unicode = ucode
self.full_name = fname
self.short_name = sname
def display(self):
desc = '{fname}:{symbol}'.format(
fname = self.full_name, symbol = chr(self.unicode))
return desc
@classmethod
def Init(cls, gua):
val = 0
for bit in gua:
val = (val << 1) | bit
for m in cls:
if m._value_ == val+1:
return m
return None
class ZhuangGua:
_gua = []
def __init__(self, matrix):
if matrix is not None:
if len(matrix)== 6:
for row in matrix:
if len(row) == 3:
# using the reversed order to pai gua
self._gua.insert(0, (Yao(row.count(1)).val()))
else:
raise TypeError()
else:
raise TypeError()
else:
raise TypeError()
def pai_gua(self):
print(self._gua)
wai_gua = BaGua.Init(self._gua[0:3])
nei_gua = BaGua.Init(self._gua[3:6])
hexagram = Hexagrams.Init(self._gua)
print(hexagram.display())
print(wai_gua.display())
print(nei_gua.display())
matrix = [[1,1,1],[1,1,1],[1,1,1],[1,1,1],[1,1,1],[1,1,1]]
zg = ZhuangGua(matrix)
zg.pai_gua()
print(Element.Wood.ke_wo().name)
print(Element.Wood.wo_ke().name)
print(Element.Wood.sheng_wo().name)
print(Element.Wood.wo_sheng().name)
print(Yao.ShaoYin)
print(Yao.ShaoYin.val())
print(Yao.ShaoYin.dong())
print(Yao.LaoYin.dong().name)
print(Yao.LaoYin.dong().val())
| 27.294686
| 77
| 0.508496
|
bbf975e1b02ccb6e613da4b6f136909426d2d4a4
| 2,541
|
py
|
Python
|
gem_metrics/questeval.py
|
ndaheim/GEM-metrics
|
cfa7a3223d94a7f7ef5fda1d2928e2510b0cbede
|
[
"MIT"
] | 30
|
2021-02-06T04:58:14.000Z
|
2022-03-04T11:26:14.000Z
|
gem_metrics/questeval.py
|
ndaheim/GEM-metrics
|
cfa7a3223d94a7f7ef5fda1d2928e2510b0cbede
|
[
"MIT"
] | 70
|
2021-01-12T17:55:15.000Z
|
2022-03-30T17:37:02.000Z
|
gem_metrics/questeval.py
|
ndaheim/GEM-metrics
|
cfa7a3223d94a7f7ef5fda1d2928e2510b0cbede
|
[
"MIT"
] | 14
|
2021-01-30T20:55:17.000Z
|
2022-03-24T02:31:21.000Z
|
#!/usr/bin/env python3
from .metric import SourceAndReferencedMetric
from questeval.questeval_metric import QuestEval as QuestEvalMetric
from logzero import logger
class QuestEval(SourceAndReferencedMetric):
def __init__(self):
# Default values
self.task = "summarization"
self.language = "en"
self._this_task_is_available = True
self.metric = QuestEvalMetric(
task=self.task,
language=self.language,
)
def support_caching(self):
# We are using corpus-level QuestEval which is aggregated.
return True
def compute(self, cache, predictions, references, sources):
# If task or language is different, we must change QA / QG models for questeval
if predictions.task != self.task or predictions.language.alpha_2 != self.language:
self.task = predictions.task
self.language = predictions.language
# Checking if the task is available
task = predictions.task
self._this_task_is_available = True
if self.task not in self.metric.AVAILABLE_TASKS:
self._this_task_is_available = False
task = "text2text"
logger.warning(
"This task is not available, QuestMetric is using the general text2text models."
)
self.metric = QuestEvalMetric(
task=task,
language=predictions.language.alpha_2,
)
# If the task was not available, then we pass references instead of sources
local_sources, local_references = sources.untokenized, [[None]] * len(sources.untokenized)
if self._this_task_is_available is False:
local_sources, local_references = [None] * len(
references.untokenized
), references.untokenized
# Computing scores through one batched step
scores = self.metric.corpus_questeval(
hypothesis=predictions.untokenized,
sources=local_sources,
list_references=local_references,
)
formatted_scores = {}
for sc, pred_id in zip(scores['ex_level_scores'], predictions.ids):
formatted_score = {"questeval": float(sc)}
formatted_scores[pred_id] = formatted_score
if cache is not None:
cache_key = (self.__class__.__name__, predictions.filename, pred_id)
cache[cache_key] = formatted_score
return formatted_scores
| 37.367647
| 100
| 0.63046
|
4c8a3cdd4e6141dd5438669a3ede65cf45e9fc74
| 5,187
|
py
|
Python
|
modules/tools/map_gen/create_traffic_light_from_event.py
|
BaiduXLab/apollo
|
2764e934b6d0da1342be781447348288ac84c5e9
|
[
"Apache-2.0"
] | 22
|
2018-10-10T14:46:32.000Z
|
2022-02-28T12:43:43.000Z
|
modules/tools/map_gen/create_traffic_light_from_event.py
|
BaiduXLab/apollo
|
2764e934b6d0da1342be781447348288ac84c5e9
|
[
"Apache-2.0"
] | 9
|
2019-12-07T07:26:32.000Z
|
2022-02-10T18:26:18.000Z
|
modules/tools/map_gen/create_traffic_light_from_event.py
|
BaiduXLab/apollo
|
2764e934b6d0da1342be781447348288ac84c5e9
|
[
"Apache-2.0"
] | 12
|
2018-12-24T02:17:19.000Z
|
2021-12-06T01:54:09.000Z
|
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
This program can create a traffic light protobuf from localization message
"""
import rosbag
import std_msgs
import argparse
import shutil
import os
import rospy
import sys
import math
from std_msgs.msg import String
import common.proto_utils as proto_utils
from common.message_manager import PbMessageManager
from modules.map.proto import map_signal_pb2
from modules.map.proto import map_geometry_pb2
g_message_manager = PbMessageManager()
g_args = None
# mkz vehicle configuration
g_front_to_center = 4.0
g_left_to_center = 1.043 + 0.5
g_right_to_center = 1.043 + 0.5
g_lane_width = 3.7
def create_stop_line(center_x, center_y, heading):
"""create a stop line from center point"""
left_x = center_x + g_left_to_center * math.cos(heading + math.pi / 2.0)
left_y = center_y + g_left_to_center * math.sin(heading + math.pi / 2.0)
right_x = center_x + g_right_to_center * math.cos(heading - math.pi / 2.0)
right_y = center_y + g_right_to_center * math.sin(heading - math.pi / 2.0)
stop_line = map_geometry_pb2.Curve()
curve_segment = stop_line.segment.add()
left_point = curve_segment.line_segment.point.add()
left_point.x = left_x
left_point.y = left_y
center_point = curve_segment.line_segment.point.add()
center_point.x = center_x
center_point.y = center_y
right_point = curve_segment.line_segment.point.add()
right_point.x = right_x
right_point.y = right_y
return stop_line
def create_signal_proto(x, y, heading):
# mkz vehicle configuration
center_x = x + g_front_to_center * math.cos(heading)
center_y = y + g_front_to_center * math.sin(heading)
map_signal = map_signal_pb2.Signal()
map_signal.id.id = "%2.5f_%2.5f" % (center_x, center_y)
map_signal.type = map_signal_pb2.Signal.MIX_3_VERTICAL
# left subsignal
left_subsignal = map_signal.subsignal.add()
left_x = center_x + g_left_to_center * math.cos(heading + math.pi / 2.0)
left_y = center_y + g_left_to_center * math.sin(heading + math.pi / 2.0)
left_subsignal.id.id = "%2.5f_%2.5f" % (left_x, left_y)
left_subsignal.type = map_signal_pb2.Subsignal.CIRCLE
left_subsignal.location.x = left_x
left_subsignal.location.y = left_y
left_subsignal.location.z = 5.0
stopline = map_signal.stop_line.add()
stopline.CopyFrom(create_stop_line(center_x, center_y, heading))
if g_args.extend_to_neighbor_lane:
# add stop line on left lane
left_shift_x = center_x + g_lane_width * math.cos(
heading + math.pi / 2.0)
left_shift_y = center_y + g_lane_width * math.sin(
heading + math.pi / 2.0)
stopline = map_signal.stop_line.add()
stopline.CopyFrom(
create_stop_line(left_shift_x, left_shift_y, heading))
# add stop line on right lane
right_shift_x = center_x + g_lane_width * math.cos(
heading - math.pi / 2.0)
right_shift_y = center_y + g_lane_width * math.sin(
heading - math.pi / 2.0)
stopline = map_signal.stop_line.add()
stopline.CopyFrom(
create_stop_line(right_shift_x, right_shift_y, heading))
return map_signal
def parse_drive_event_file(drive_event_filename, signal_filename):
drive_event = g_message_manager.parse_topic_file("/apollo/drive_event",
drive_event_filename)
if not drive_event:
print("Failed to find localization in %s" % drive_event_filename)
return None
pose = drive_event.location
map_signal = create_signal_proto(pose.position.x, pose.position.y,
pose.heading)
proto_utils.write_pb_to_text_file(map_signal, signal_filename)
return map_signal
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=
"A tool to create traffic light protobuf message from localization.")
parser.add_argument(
"drive_event_filename",
action="store",
help="""the drive event file name""")
parser.add_argument(
"signal_filename", action="store", help="""the signal file name""")
parser.add_argument(
"--extend_to_neighbor_lane",
action="store_true",
help="""the signal file name""")
g_args = parser.parse_args()
parse_drive_event_file(g_args.drive_event_filename, g_args.signal_filename)
| 35.527397
| 79
| 0.679391
|
a846673c34f633512e11dedc7da5098eddd42063
| 453
|
py
|
Python
|
api/utils/parse_params.py
|
odbalogun/areavas-bl
|
bde6696e52cc1b1f780b26803f4071edcc6ca428
|
[
"Apache-2.0"
] | null | null | null |
api/utils/parse_params.py
|
odbalogun/areavas-bl
|
bde6696e52cc1b1f780b26803f4071edcc6ca428
|
[
"Apache-2.0"
] | null | null | null |
api/utils/parse_params.py
|
odbalogun/areavas-bl
|
bde6696e52cc1b1f780b26803f4071edcc6ca428
|
[
"Apache-2.0"
] | null | null | null |
from functools import wraps
from flask_restful import reqparse
def parse_params(*arguments):
def parse(func):
@wraps(func)
def resource_verb(*args, **kwargs):
parser = reqparse.RequestParser()
for argument in arguments:
parser.add_argument(argument)
kwargs.update(parser.parse_args())
return func(*args, **kwargs)
return resource_verb
return parse
| 23.842105
| 50
| 0.615894
|
133b3af168014ff21112d88a4c5a4ec23dee1927
| 857
|
py
|
Python
|
app/grandchallenge/cases/migrations/0023_auto_20200521_1052.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | 1
|
2021-02-09T10:30:44.000Z
|
2021-02-09T10:30:44.000Z
|
app/grandchallenge/cases/migrations/0023_auto_20200521_1052.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | null | null | null |
app/grandchallenge/cases/migrations/0023_auto_20200521_1052.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-05-21 10:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("cases", "0022_auto_20200325_1151"),
]
operations = [
migrations.AddField(
model_name="image",
name="window_center",
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name="image",
name="window_width",
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name="image",
name="name",
field=models.CharField(max_length=4096),
),
migrations.AlterField(
model_name="rawimagefile",
name="filename",
field=models.CharField(max_length=4096),
),
]
| 25.205882
| 52
| 0.556593
|
3306a4d0e138c91909af4110a1e689e593d06a24
| 14,189
|
py
|
Python
|
client/v1/docker_image_.py
|
pcj/containerregistry
|
657fcea7f1206de849058517bd0a0b5bdc92d325
|
[
"Apache-2.0"
] | null | null | null |
client/v1/docker_image_.py
|
pcj/containerregistry
|
657fcea7f1206de849058517bd0a0b5bdc92d325
|
[
"Apache-2.0"
] | 1
|
2018-04-12T13:52:03.000Z
|
2018-04-12T13:52:03.000Z
|
client/v1/docker_image_.py
|
pcj/containerregistry
|
657fcea7f1206de849058517bd0a0b5bdc92d325
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package provides DockerImage for examining docker_build outputs."""
import abc
import cStringIO
import gzip
import httplib
import json
import os
import string
import subprocess
import sys
import tarfile
import tempfile
import threading
from containerregistry.client import docker_creds
from containerregistry.client import docker_name
from containerregistry.client.v1 import docker_creds as v1_creds
from containerregistry.client.v1 import docker_http
import httplib2
class DockerImage(object):
"""Interface for implementations that interact with Docker images."""
__metaclass__ = abc.ABCMeta # For enforcing that methods are overridden.
# pytype: disable=bad-return-type
@abc.abstractmethod
def top(self):
"""The layer id of the topmost layer."""
# pytype: enable=bad-return-type
# pytype: disable=bad-return-type
@abc.abstractmethod
def repositories(self):
"""The json blob of tags, loaded as a dict."""
pass
# pytype: enable=bad-return-type
def parent(self, layer_id):
"""The layer of id of the parent of the provided layer, or None.
Args:
layer_id: the id of the layer whose parentage we're asking
Returns:
The identity of the parent layer, or None if the root.
"""
metadata = json.loads(self.json(layer_id))
if 'parent' not in metadata:
return None
return metadata['parent']
# pytype: disable=bad-return-type
@abc.abstractmethod
def json(self, layer_id):
"""The JSON metadata of the provided layer.
Args:
layer_id: the id of the layer whose metadata we're asking
Returns:
The raw json string of the layer.
"""
pass
# pytype: enable=bad-return-type
# pytype: disable=bad-return-type
@abc.abstractmethod
def layer(self, layer_id):
"""The layer.tar.gz blob of the provided layer id.
Args:
layer_id: the id of the layer for whose layer blob we're asking
Returns:
The raw blob string of the layer.
"""
pass
# pytype: enable=bad-return-type
def uncompressed_layer(self, layer_id):
"""Same as layer() but uncompressed."""
zipped = self.layer(layer_id)
buf = cStringIO.StringIO(zipped)
f = gzip.GzipFile(mode='rb', fileobj=buf)
unzipped = f.read()
return unzipped
# pytype: disable=bad-return-type
@abc.abstractmethod
def ancestry(self, layer_id):
"""The ancestry of the given layer, base layer first.
Args:
layer_id: the id of the layer whose ancestry we're asking
Returns:
The list of ancestor IDs, base first, layer_id last.
"""
pass
# pytype: enable=bad-return-type
# __enter__ and __exit__ allow use as a context manager.
@abc.abstractmethod
def __enter__(self):
pass
@abc.abstractmethod
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
# Gzip injects a timestamp into its output, which makes its output and digest
# non-deterministic. To get reproducible pushes, freeze time.
# This approach is based on the following StackOverflow answer:
# http://stackoverflow.com/
# questions/264224/setting-the-gzip-timestamp-from-python
class _FakeTime(object):
def time(self):
return 1225856967.109
gzip.time = _FakeTime()
class FromShardedTarball(DockerImage):
"""This decodes the sharded image tarballs from docker_build."""
def __init__(self,
layer_to_tarball,
top,
name = None,
compresslevel = 9):
self._layer_to_tarball = layer_to_tarball
self._top = top
self._compresslevel = compresslevel
self._memoize = {}
self._lock = threading.Lock()
self._name = name
def _content(self, layer_id, name,
memoize = True):
"""Fetches a particular path's contents from the tarball."""
# Check our cache
if memoize:
with self._lock:
if name in self._memoize:
return self._memoize[name]
# tarfile is inherently single-threaded:
# https://mail.python.org/pipermail/python-bugs-list/2015-March/265999.html
# so instead of locking, just open the tarfile for each file
# we want to read.
with tarfile.open(name=self._layer_to_tarball(layer_id), mode='r') as tar:
try:
content = tar.extractfile(name).read() # pytype: disable=attribute-error
except KeyError:
content = tar.extractfile('./' + name).read() # pytype: disable=attribute-error
# Populate our cache.
if memoize:
with self._lock:
self._memoize[name] = content
return content
def top(self):
"""Override."""
return self._top
def repositories(self):
"""Override."""
return json.loads(self._content(self.top(), 'repositories'))
def json(self, layer_id):
"""Override."""
return self._content(layer_id, layer_id + '/json')
# Large, do not memoize.
def uncompressed_layer(self, layer_id):
"""Override."""
return self._content(layer_id, layer_id + '/layer.tar', memoize=False)
# Large, do not memoize.
def layer(self, layer_id):
"""Override."""
unzipped = self.uncompressed_layer(layer_id)
buf = cStringIO.StringIO()
f = gzip.GzipFile(mode='wb', compresslevel=self._compresslevel, fileobj=buf)
try:
f.write(unzipped)
finally:
f.close()
zipped = buf.getvalue()
return zipped
def ancestry(self, layer_id):
"""Override."""
p = self.parent(layer_id)
if not p:
return [layer_id]
return [layer_id] + self.ancestry(p)
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
def _get_top(tarball, name = None):
"""Get the topmost layer in the image tarball."""
with tarfile.open(name=tarball, mode='r') as tar:
try:
repositories = json.loads(tar.extractfile('repositories').read()) # pytype: disable=attribute-error
except KeyError:
repositories = json.loads(tar.extractfile('./repositories').read()) # pytype: disable=attribute-error
if name:
key = str(name.as_repository())
return repositories[key][name.tag]
if len(repositories) != 1:
raise ValueError('Tarball must contain a single repository, '
'or a name must be specified to FromTarball.')
for (unused_repo, tags) in repositories.iteritems():
if len(tags) != 1:
raise ValueError('Tarball must contain a single tag, '
'or a name must be specified to FromTarball.')
for (unused_tag, layer_id) in tags.iteritems():
return layer_id
raise Exception('Unreachable code in _get_top()')
class FromTarball(FromShardedTarball):
"""This decodes the image tarball output of docker_build for upload."""
def __init__(self,
tarball,
name = None,
compresslevel = 9):
super(FromTarball, self).__init__(
lambda unused_id: tarball,
_get_top(tarball, name),
name=name,
compresslevel=compresslevel)
class FromRegistry(DockerImage):
"""This accesses a docker image hosted on a registry (non-local)."""
def __init__(
self,
name,
basic_creds,
transport):
self._name = name
self._creds = basic_creds
self._transport = transport
# Set up in __enter__
self._tags = {}
self._response = {}
def top(self):
"""Override."""
assert isinstance(self._name, docker_name.Tag)
return self._tags[self._name.tag]
def repositories(self):
"""Override."""
return {self._name.repository: self._tags}
def tags(self):
"""Lists the tags present in the remote repository."""
return self.raw_tags().keys()
def raw_tags(self):
"""Dictionary of tag to image id."""
return self._tags
def _content(self, suffix):
if suffix not in self._response:
_, self._response[suffix] = docker_http.Request(
self._transport, '{scheme}://{endpoint}/v1/images/{suffix}'.format(
scheme=docker_http.Scheme(self._endpoint),
endpoint=self._endpoint,
suffix=suffix), self._creds, [httplib.OK])
return self._response[suffix]
def json(self, layer_id):
"""Override."""
# GET server1/v1/images/IMAGEID/json
return self._content(layer_id + '/json')
# Large, do not memoize.
def layer(self, layer_id):
"""Override."""
# GET server1/v1/images/IMAGEID/layer
return self._content(layer_id + '/layer')
def ancestry(self, layer_id):
"""Override."""
# GET server1/v1/images/IMAGEID/ancestry
return json.loads(self._content(layer_id + '/ancestry'))
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
# This initiates the pull by issuing:
# GET H:P/v1/repositories/R/images
resp, unused_content = docker_http.Request(
self._transport,
'{scheme}://{registry}/v1/repositories/{repository_name}/images'.format(
scheme=docker_http.Scheme(self._name.registry),
registry=self._name.registry,
repository_name=self._name.repository), self._creds, [httplib.OK])
# The response should have an X-Docker-Token header, which
# we should extract and annotate subsequent requests with:
# Authorization: Token {extracted value}
self._creds = v1_creds.Token(resp['x-docker-token'])
self._endpoint = resp['x-docker-endpoints']
# TODO(user): Consider also supporting cookies, which are
# used by Quay.io for authenticated sessions.
# Next, fetch the set of tags in this repository.
# GET server1/v1/repositories/R/tags
resp, content = docker_http.Request(
self._transport,
'{scheme}://{endpoint}/v1/repositories/{repository_name}/tags'.format(
scheme=docker_http.Scheme(self._endpoint),
endpoint=self._endpoint,
repository_name=self._name.repository), self._creds, [httplib.OK])
self._tags = json.loads(content)
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
class Random(DockerImage):
"""This generates an image with Random properties.
We ensure basic consistency of the generated docker
image.
"""
# TODO(b/36589467): Add function arg for creating blob.
def __init__(self,
sample,
num_layers = 5,
layer_byte_size = 64,
blobs = None):
# Generate the image.
self._ancestry = []
self._layers = {}
num_layers = len(blobs) if blobs else num_layers
for i in xrange(num_layers):
# Avoid repetitions.
while True:
# Typecheck disabled due to b/38395615
layer_id = self._next_id(sample) # pytype: disable=wrong-arg-types
if layer_id not in self._ancestry:
self._ancestry += [layer_id]
blob = blobs[i] if blobs else None
self._layers[layer_id] = self._next_layer(
sample, layer_byte_size, blob) # pytype: disable=wrong-arg-types
break
def top(self):
"""Override."""
return self._ancestry[0]
def repositories(self):
"""Override."""
return {
'random/image': {
# TODO(user): Remove this suppression.
'latest': self.top(), # type: ignore
}
}
def json(self, layer_id):
"""Override."""
metadata = {'id': layer_id}
ancestry = self.ancestry(layer_id)
if len(ancestry) != 1:
metadata['parent'] = ancestry[1]
return json.dumps(metadata, sort_keys=True)
def layer(self, layer_id):
"""Override."""
return self._layers[layer_id]
def ancestry(self, layer_id):
"""Override."""
assert layer_id in self._ancestry
index = self._ancestry.index(layer_id)
return self._ancestry[index:]
def _next_id(self, sample):
return sample('0123456789abcdef', 64)
# pylint: disable=missing-docstring
def _next_layer(self, sample,
layer_byte_size, blob):
buf = cStringIO.StringIO()
# TODO(user): Consider doing something more creative...
with tarfile.open(fileobj=buf, mode='w:gz') as tar:
if blob:
info = tarfile.TarInfo(
name='./' +
self._next_id(sample)) # pytype: disable=wrong-arg-types
info.size = len(blob)
tar.addfile(info, fileobj=cStringIO.StringIO(blob))
# Linux optimization, use dd for data file creation.
elif sys.platform.startswith('linux') and layer_byte_size >= 1024 * 1024:
mb = layer_byte_size / (1024 * 1024)
tempdir = tempfile.mkdtemp()
data_filename = os.path.join(tempdir, 'a.bin')
if os.path.exists(data_filename):
os.remove(data_filename)
process = subprocess.Popen([
'dd', 'if=/dev/urandom',
'of=%s' % data_filename, 'bs=1M',
'count=%d' % mb
])
process.wait()
with open(data_filename, 'rb') as fd:
info = tar.gettarinfo(name=data_filename)
tar.addfile(info, fileobj=fd)
os.remove(data_filename)
os.rmdir(tempdir)
else:
data = sample(string.printable, layer_byte_size)
info = tarfile.TarInfo(
name='./' +
self._next_id(sample)) # pytype: disable=wrong-arg-types
info.size = len(data)
tar.addfile(info, fileobj=cStringIO.StringIO(data))
return buf.getvalue()
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
pass
| 29.622129
| 108
| 0.657904
|
61b549582c309d90ad35bc0c90411156ddd38f64
| 5,240
|
py
|
Python
|
brandenburg/scraper_brandenburg.py
|
okfde/bundesrat-scraper
|
a5e5b8e4172fb85430807f2a78fe78e69a826dab
|
[
"MIT"
] | 10
|
2018-12-08T12:16:52.000Z
|
2021-05-23T08:14:08.000Z
|
brandenburg/scraper_brandenburg.py
|
okfde/bundesrat-scraper
|
a5e5b8e4172fb85430807f2a78fe78e69a826dab
|
[
"MIT"
] | 17
|
2018-12-07T09:48:31.000Z
|
2020-09-03T14:37:54.000Z
|
brandenburg/scraper_brandenburg.py
|
okfde/bundesrat-scraper
|
a5e5b8e4172fb85430807f2a78fe78e69a826dab
|
[
"MIT"
] | 1
|
2019-01-21T15:20:00.000Z
|
2019-01-21T15:20:00.000Z
|
import re
import pdb
import requests
from lxml import html as etree
import pdfcutter
# Import relative Parent Directory for Helper Classes
import os, sys
sys.path.insert(0, os.path.abspath('..')) #Used when call is ` python3 file.py`
sys.path.insert(0, os.path.abspath('.')) #Used when call is ` python3 $COUNTY/file.py`
import helper
import selectionVisualizer as dVis
import PDFTextExtractor
import MainBoilerPlate
INDEX_URL = 'https://landesvertretung-brandenburg.de/bundesrat/abstimmungsverhalten-im-bundesrat/'
NUM_RE = re.compile(r'(\d+)\. Sitzung des Bundesrates')
BR_TEXT_RE = re.compile(r'^Ergebnis BR:')
class MainExtractorMethod(MainBoilerPlate.MainExtractorMethod):
#Out: Dict of {sessionNumberOfBR: PDFWebLink} entries
def _get_pdf_urls(self):
response = requests.get(INDEX_URL)
root = etree.fromstring(response.content)
#Have three completely different xpaths for year-tables
#Therefore, filter (almost) all links (a)
allLinks = root.xpath('//ul/li/a')
for name in allLinks:
text = name.text_content()
maybeNum = NUM_RE.search(text) #Links to a Bundesrat-PDF?
if maybeNum: #Also have e.g. "Mitglieder Brandenburgs im Bundesrat" as link -> Filter them out
num = int(maybeNum.group(1))
link = name.attrib['href']
link = link.replace(" ", "%20") #Replace Space with HTML Escape Character
yield int(num), link
#Senats/BR Texts and TOPS in BW all have same formatting
class SenatsAndBRTextExtractor(PDFTextExtractor.AbstractSenatsAndBRTextExtractor):
def _extractSenatBRTexts(self, selectionCurrentTOP, selectionNextTOP):
page_heading = 73 #Bottom of heading on each page
page_footer = 1160 #Upper of footer on each page
#Get indented Text, Senats/BR text is everything below it, need to check below this because otherwise I also filter Name of TOP
TOPRightIndented = self.cutter.all().below(selectionCurrentTOP).filter(
left__gte = selectionCurrentTOP.left + 100,
top__lt = page_footer# Match otherwise page number for e.g. 984 26
)
if selectionNextTOP:
TOPRightIndented = TOPRightIndented.above(selectionNextTOP)
last_indented_with_text = None
#empty, but present lines below senat text can mess up parsing, so only watch for last non-empty
for line in TOPRightIndented:
if line.clean_text().strip(): #empty strings are falsy
last_indented_with_text = line
#dVis.showCutter(last_indented_with_text)
senatsBR_text = self.cutter.all().below(last_indented_with_text)
#dVis.showCutter(senatsBR_text)
if selectionNextTOP:
senatsBR_text = senatsBR_text.above(selectionNextTOP)
br_text_title = senatsBR_text.filter(auto_regex='^Ergebnis Bundesrat:')
if br_text_title: #Cut BR away, but above() always empty if no BR title exists
senats_text = senatsBR_text.above(br_text_title).clean_text()
else:
senats_text = senatsBR_text.clean_text()
#For some reason the BR Text is always empty when I do:
#BR_text = senatsBR_text.below(BR_text_title).clean_text()
br_text = senatsBR_text.filter(
doc_top__gte=br_text_title.doc_top +1 ,
top__lt = page_footer# Match otherwise page number for e.g. 984 26
).clean_text()
return senats_text, br_text
#Senats/BR Texts and TOPS in BW all have same formatting
class TextExtractorHolder(PDFTextExtractor.TextExtractorHolder):
def _getRightTOPPositionFinder(self, top):
TOPRight=200
if self.sessionNumber >= 986:
formatTOPsWithSubpart="{number}{subpart}" #e.g. BB 992 23. a) is "23a"
elif self.sessionNumber == 985:
formatTOPsWithSubpart="{number} {subpart}" #e.g. BB 985 9. a) is "9 a"
elif self.sessionNumber == 980 and top in ["2. a)", "2. b)", "25. a)", "25. b)"]: #980 80a is like the next case below again
formatTOPsWithSubpart="{number} {subpart}" #e.g. BB 980 "2. a)" is "2 a"
elif 974 <= self.sessionNumber <= 984:
formatTOPsWithSubpart="{number}{subpart}" #e.g. BB 984 45. a) is "45a"
if self.sessionNumber == 984:
TOPRight = 145 # Else match 26. with "26. März..." of TOP 15
elif 970 <= self.sessionNumber <= 973:
formatTOPsWithSubpart="{number}{subpart}." #e.g. BB 973 25. a) is "25a."
elif 968 <= self.sessionNumber <= 969:
formatTOPsWithSubpart="{number} {subpart}" #e.g. BB 969 21. a) is "21 a"
elif self.sessionNumber <= 967:
formatTOPsWithSubpart="{number}{subpart}." #e.g. BB 967 3. a) is "3a."
return PDFTextExtractor.CustomTOPFormatPositionFinder(self.cutter, formatSubpartTOP=formatTOPsWithSubpart, TOPRight=TOPRight) #945 13. in date would cause problems without TOPRight
# Decide if I need custom rules for special session/TOP cases because PDF format isn't consistent
#In BW all Text Rules are consistent
def _getRightSenatBRTextExtractor(self, top, cutter):
return SenatsAndBRTextExtractor(cutter)
| 46.785714
| 188
| 0.676145
|
b26d7d3e5940da4687869e14aaaf914363be9fd0
| 323
|
py
|
Python
|
src/python/WMCore/WMBS/Oracle/Jobs/GetCountByState.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 21
|
2015-11-19T16:18:45.000Z
|
2021-12-02T18:20:39.000Z
|
src/python/WMCore/WMBS/Oracle/Jobs/GetCountByState.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 5,671
|
2015-01-06T14:38:52.000Z
|
2022-03-31T22:11:14.000Z
|
src/python/WMCore/WMBS/Oracle/Jobs/GetCountByState.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 67
|
2015-01-21T15:55:38.000Z
|
2022-02-03T19:53:13.000Z
|
#!/usr/bin/env python
"""
_GetCountByState_
Oracle implementation of Jobs.GetCountByState
"""
from __future__ import division
from WMCore.WMBS.MySQL.Jobs.GetCountByState import GetCountByState as MySQLGetCountByState
class GetCountByState(MySQLGetCountByState):
"""
Identical to MySQL version.
"""
pass
| 19
| 90
| 0.770898
|
b21d015eaec6dea6d2f9659c6522e827032831d6
| 2,443
|
py
|
Python
|
pyrandall/executors/requests_http.py
|
kpn/pyrandall
|
ece91d3cae901c1913451c3c8996a7ce9f6f58af
|
[
"Apache-2.0"
] | 2
|
2019-09-26T13:45:35.000Z
|
2019-09-27T08:03:43.000Z
|
pyrandall/executors/requests_http.py
|
kpn/pyrandall
|
ece91d3cae901c1913451c3c8996a7ce9f6f58af
|
[
"Apache-2.0"
] | 7
|
2019-09-25T09:00:36.000Z
|
2020-06-29T06:58:11.000Z
|
pyrandall/executors/requests_http.py
|
kpn/pyrandall
|
ece91d3cae901c1913451c3c8996a7ce9f6f58af
|
[
"Apache-2.0"
] | 2
|
2019-09-25T09:41:38.000Z
|
2020-02-17T11:49:37.000Z
|
import requests
from pyrandall.types import Assertion
from .common import Executor
class RequestHttp(Executor):
def __init__(self, spec, *args, **kwargs):
super().__init__()
self.execution_mode = spec.execution_mode
self.spec = spec
def execute(self, reporter):
spec = self.spec
if len(spec.assertions) == 0:
# TODO: Reporter should say "zero assertions found / specified"
return False
# TODO: assert / tests the request happened without exceptions
# act on __exit__ codes
# with Assertion("response", spec.assertions, "http response", reporter) as a:
if spec.body:
response = requests.request(
spec.method, spec.url, headers=spec.headers, data=spec.body
)
else:
response = requests.request(spec.method, spec.url, headers=spec.headers)
assertions = []
with Assertion(
"status_code", spec.assertions, "http response status_code", reporter
) as a:
assertions.append(a)
a.actual_value = response.status_code
with Assertion("body", spec.assertions, "http response body", reporter) as a:
# a.result = event.json_deep_equals(a.expected, response.content)
assertions.append(a)
a.actual_value = response.content
# TODO: depricate this, not functioally needed anymore
return all([a.passed() for a in assertions])
# TODO: move this to reporter
def create_jsondiff(self, expected, actual):
print("Output data different")
print(f"Expected: {expected}")
print(f"Actual: {actual}")
def represent(self):
return f"RequestHttp {self.spec.execution_mode.represent()} {self.spec.method} to {self.spec.url}"
class RequestHttpEvents(Executor):
def __init__(self, spec, *args, **kwargs):
super().__init__()
self.execution_mode = spec.execution_mode
self.spec = spec
self.nr_of_requests = len(spec.requests)
def execute(self, reporter):
if self.nr_of_requests == 0:
# TODO: Reporter should say "zero events found / specified"
return False
return all([RequestHttp(r).execute(reporter) for r in self.spec.requests])
def represent(self):
return f"RequestHttpEvents {self.spec.execution_mode.represent()} {self.nr_of_requests} events"
| 34.9
| 106
| 0.633647
|
c475621ccd6cc59fd49da395f0d9cf338c8aea09
| 580
|
py
|
Python
|
molecool/measure.py
|
y-yao/molssi_tutorial
|
d3ef2f4710e0c2ba8c1cad45342e335153d8dfef
|
[
"BSD-3-Clause"
] | null | null | null |
molecool/measure.py
|
y-yao/molssi_tutorial
|
d3ef2f4710e0c2ba8c1cad45342e335153d8dfef
|
[
"BSD-3-Clause"
] | null | null | null |
molecool/measure.py
|
y-yao/molssi_tutorial
|
d3ef2f4710e0c2ba8c1cad45342e335153d8dfef
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
def calculate_distance(rA, rB):
# This function calculates the distance between two points given as numpy arrays.
d=(rA-rB)
dist=np.linalg.norm(d)
return dist
def calculate_angle(rA, rB, rC, degrees=False):
# Calculate the angle between three points. Answer is given in radians by default, but can be given in degrees
# by setting degrees=True
AB = rB - rA
BC = rB - rC
theta=np.arccos(np.dot(AB, BC)/(np.linalg.norm(AB)*np.linalg.norm(BC)))
if degrees:
return np.degrees(theta)
else:
return theta
| 29
| 114
| 0.67069
|
e42e472ed237c4b50a663755c0780d69c0ec20f7
| 2,405
|
py
|
Python
|
setup.py
|
bartoszj/Mallet
|
0645b08c7eaea4b2f2769a0ca0d84fa8f0332357
|
[
"MIT"
] | 16
|
2015-09-07T00:34:49.000Z
|
2021-11-12T05:54:01.000Z
|
setup.py
|
bartoszj/Mallet
|
0645b08c7eaea4b2f2769a0ca0d84fa8f0332357
|
[
"MIT"
] | null | null | null |
setup.py
|
bartoszj/Mallet
|
0645b08c7eaea4b2f2769a0ca0d84fa8f0332357
|
[
"MIT"
] | 2
|
2017-05-21T16:39:00.000Z
|
2017-06-11T13:09:07.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
# Instruction:
# - Dev
# - python setup.py develop --user
# - (test)
# - python setup.py develop --user -u
#
# - PyPI
# - python setup.py register -r https://pypi.python.org/pypi
#
# - python setup.py sdist bdist_wheel
# - pip install --user dist/mallet_lldb*
# - (test)
# - pip uninstall mallet-lldb
#
# - twine upload dist/*
# - pip install --user mallet-lldb
# - (test)
# - pip uninstall mallet-lldb
#
# - Test PyPI
# - python setup.py register -r https://testpypi.python.org/pypi
#
# - python setup.py sdist bdist_wheel
# - pip install --user dist/mallet_lldb*
# - (test)
# - pip uninstall mallet-lldb
#
# - twine upload -r pypitest dist/*
# - pip install -i https://testpypi.python.org/pypi --user mallet-lldb
# - (test)
# - pip uninstall mallet-lldb
setup(name="mallet-lldb",
version="1.0a2",
description="LLDB additions for iOS project.",
url="https://github.com/bartoszj/Mallet",
author="Bartosz Janda",
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Environment :: MacOS X",
"Environment :: Plugins",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Debuggers"
],
keywords="LLDB debugger development iOS summary",
packages=find_packages(),
install_requires=["PyYAML"],
extras_require={
"dev": ["tabulate"]
},
package_data={
"mallet": ["config.yml"],
"mallet.AFNetworking": ["config.yml", "lldbinit"],
"mallet.CFNetwork": ["config.yml", "lldbinit", "class_dumps/*.json"],
"mallet.common": ["config.yml", "lldbinit"],
"mallet.CoreGraphics": ["config.yml", "lldbinit"],
"mallet.debug_commands": ["config.yml", "lldbinit"],
"mallet.Foundation": ["config.yml", "lldbinit", "class_dumps/*.json"],
"mallet.QuartzCore": ["config.yml", "lldbinit", "class_dumps/*.json"],
"mallet.StoreKit": ["config.yml", "lldbinit", "class_dumps/*.json"],
"mallet.UIKit": ["config.yml", "lldbinit", "class_dumps/*.json"],
})
| 32.945205
| 80
| 0.590021
|
67921bc90ba3c34e512bd3e4167322daea4dc689
| 7,396
|
py
|
Python
|
lab9/text_recognizer/models/resnet_transformer.py
|
AleksandrLiadov/fsdl-text-recognizer-2021-labs
|
9495e1457fc82ab83ff7e4141939d603565eb89b
|
[
"MIT"
] | 402
|
2021-01-18T12:14:08.000Z
|
2022-03-28T03:41:05.000Z
|
lab9/text_recognizer/models/resnet_transformer.py
|
AleksandrLiadov/fsdl-text-recognizer-2021-labs
|
9495e1457fc82ab83ff7e4141939d603565eb89b
|
[
"MIT"
] | 27
|
2021-01-21T01:54:30.000Z
|
2022-03-29T21:39:41.000Z
|
lab9/text_recognizer/models/resnet_transformer.py
|
AleksandrLiadov/fsdl-text-recognizer-2021-labs
|
9495e1457fc82ab83ff7e4141939d603565eb89b
|
[
"MIT"
] | 271
|
2021-01-21T18:07:24.000Z
|
2022-03-30T12:49:53.000Z
|
import argparse
from typing import Any, Dict
import math
import torch
import torch.nn as nn
import torchvision
from .transformer_util import PositionalEncodingImage, PositionalEncoding, generate_square_subsequent_mask
TF_DIM = 256
TF_FC_DIM = 1024
TF_DROPOUT = 0.4
TF_LAYERS = 4
TF_NHEAD = 4
RESNET_DIM = 512 # hard-coded
class ResnetTransformer(nn.Module):
"""Process the line through a Resnet and process the resulting sequence with a Transformer decoder"""
def __init__(
self,
data_config: Dict[str, Any],
args: argparse.Namespace = None,
) -> None:
super().__init__()
self.data_config = data_config
self.input_dims = data_config["input_dims"]
self.num_classes = len(data_config["mapping"])
inverse_mapping = {val: ind for ind, val in enumerate(data_config["mapping"])}
self.start_token = inverse_mapping["<S>"]
self.end_token = inverse_mapping["<E>"]
self.padding_token = inverse_mapping["<P>"]
self.max_output_length = data_config["output_dims"][0]
self.args = vars(args) if args is not None else {}
self.dim = self.args.get("tf_dim", TF_DIM)
tf_fc_dim = self.args.get("tf_fc_dim", TF_FC_DIM)
tf_nhead = self.args.get("tf_nhead", TF_NHEAD)
tf_dropout = self.args.get("tf_dropout", TF_DROPOUT)
tf_layers = self.args.get("tf_layers", TF_LAYERS)
# ## Encoder part - should output vector sequence of length self.dim per sample
resnet = torchvision.models.resnet18(pretrained=False)
self.resnet = torch.nn.Sequential(*(list(resnet.children())[:-2])) # Exclude AvgPool and Linear layers
# Resnet will output (B, RESNET_DIM, _H, _W) logits where _H = input_H // 32, _W = input_W // 32
# self.encoder_projection = nn.Conv2d(RESNET_DIM, self.dim, kernel_size=(2, 1), stride=(2, 1), padding=0)
self.encoder_projection = nn.Conv2d(RESNET_DIM, self.dim, kernel_size=1)
# encoder_projection will output (B, dim, _H, _W) logits
self.enc_pos_encoder = PositionalEncodingImage(
d_model=self.dim, max_h=self.input_dims[1], max_w=self.input_dims[2]
) # Max (Ho, Wo)
# ## Decoder part
self.embedding = nn.Embedding(self.num_classes, self.dim)
self.fc = nn.Linear(self.dim, self.num_classes)
self.dec_pos_encoder = PositionalEncoding(d_model=self.dim, max_len=self.max_output_length)
self.y_mask = generate_square_subsequent_mask(self.max_output_length)
self.transformer_decoder = nn.TransformerDecoder(
nn.TransformerDecoderLayer(d_model=self.dim, nhead=tf_nhead, dim_feedforward=tf_fc_dim, dropout=tf_dropout),
num_layers=tf_layers,
)
self.init_weights() # This is empirically important
def init_weights(self):
initrange = 0.1
self.embedding.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_()
self.fc.weight.data.uniform_(-initrange, initrange)
nn.init.kaiming_normal_(self.encoder_projection.weight.data, a=0, mode="fan_out", nonlinearity="relu")
if self.encoder_projection.bias is not None:
_fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out( # pylint: disable=protected-access
self.encoder_projection.weight.data
)
bound = 1 / math.sqrt(fan_out)
nn.init.normal_(self.encoder_projection.bias, -bound, bound)
def encode(self, x: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
x
(B, H, W) image
Returns
-------
torch.Tensor
(Sx, B, E) logits
"""
_B, C, _H, _W = x.shape
if C == 1:
x = x.repeat(1, 3, 1, 1)
x = self.resnet(x) # (B, RESNET_DIM, _H // 32, _W // 32), (B, 512, 18, 20) in the case of IAMParagraphs
x = self.encoder_projection(x) # (B, E, _H // 32, _W // 32), (B, 256, 18, 20) in the case of IAMParagraphs
# x = x * math.sqrt(self.dim) # (B, E, _H // 32, _W // 32) # This prevented any learning
x = self.enc_pos_encoder(x) # (B, E, Ho, Wo); Ho = _H // 32, Wo = _W // 32
x = torch.flatten(x, start_dim=2) # (B, E, Ho * Wo)
x = x.permute(2, 0, 1) # (Sx, B, E); Sx = Ho * Wo
return x
def decode(self, x, y):
"""
Parameters
----------
x
(B, H, W) image
y
(B, Sy) with elements in [0, C-1] where C is num_classes
Returns
-------
torch.Tensor
(Sy, B, C) logits
"""
y_padding_mask = y == self.padding_token
y = y.permute(1, 0) # (Sy, B)
y = self.embedding(y) * math.sqrt(self.dim) # (Sy, B, E)
y = self.dec_pos_encoder(y) # (Sy, B, E)
Sy = y.shape[0]
y_mask = self.y_mask[:Sy, :Sy].type_as(x)
output = self.transformer_decoder(
tgt=y, memory=x, tgt_mask=y_mask, tgt_key_padding_mask=y_padding_mask
) # (Sy, B, E)
output = self.fc(output) # (Sy, B, C)
return output
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
x
(B, H, W) image
y
(B, Sy) with elements in [0, C-1] where C is num_classes
Returns
-------
torch.Tensor
(B, C, Sy) logits
"""
x = self.encode(x) # (Sx, B, E)
output = self.decode(x, y) # (Sy, B, C)
return output.permute(1, 2, 0) # (B, C, Sy)
def predict(self, x: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
x
(B, H, W) image
Returns
-------
torch.Tensor
(B, Sy) with elements in [0, C-1] where C is num_classes
"""
B = x.shape[0]
S = self.max_output_length
x = self.encode(x) # (Sx, B, E)
output_tokens = (torch.ones((B, S)) * self.padding_token).type_as(x).long() # (B, S)
output_tokens[:, 0] = self.start_token # Set start token
for Sy in range(1, S):
y = output_tokens[:, :Sy] # (B, Sy)
output = self.decode(x, y) # (Sy, B, C)
output = torch.argmax(output, dim=-1) # (Sy, B)
output_tokens[:, Sy : Sy + 1] = output[-1:] # Set the last output token
# Early stopping of prediction loop to speed up prediction
if ((output_tokens[:, Sy] == self.end_token) | (output_tokens[:, Sy] == self.padding_token)).all():
break
# Set all tokens after end token to be padding
for Sy in range(1, S):
ind = (output_tokens[:, Sy - 1] == self.end_token) | (output_tokens[:, Sy - 1] == self.padding_token)
output_tokens[ind, Sy] = self.padding_token
return output_tokens # (B, Sy)
@staticmethod
def add_to_argparse(parser):
parser.add_argument("--tf_dim", type=int, default=TF_DIM)
parser.add_argument("--tf_fc_dim", type=int, default=TF_DIM)
parser.add_argument("--tf_dropout", type=float, default=TF_DROPOUT)
parser.add_argument("--tf_layers", type=int, default=TF_LAYERS)
parser.add_argument("--tf_nhead", type=int, default=TF_NHEAD)
return parser
| 37.543147
| 120
| 0.579638
|
66b414b2099ac3d6d0dcd5d7e996b8ac0026651f
| 742
|
py
|
Python
|
setup.py
|
rgcmaack/cinema_lib
|
9c38fe0c2252fc675c78d965873342c21bee401f
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
rgcmaack/cinema_lib
|
9c38fe0c2252fc675c78d965873342c21bee401f
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
rgcmaack/cinema_lib
|
9c38fe0c2252fc675c78d965873342c21bee401f
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
try:
from setuptools import setup, find_packages
except Exception as e:
print("cinema_lib requires Python 3.6. Exiting.")
sys.exit()
import unittest
from cinema_lib import version
def readme():
with open('README.md') as f:
return f.read()
def tests():
loader = unittest.TestLoader()
return loader.discover('cinema_lib.test')
setup(name='cinema_lib',
version=version(),
description="Library for Cinema databases",
long_description=readme(),
test_suite='setup.tests',
zip_safe=False,
entry_points={
'console_scripts': ['cinema = cinema_lib.cl:main']
},
packages=find_packages(exclude=["test"]),
python_requires="~=3.6"
)
| 23.1875
| 60
| 0.648248
|
98d3ce3ae7ff6ead94a23a94d52ae436b52ee968
| 2,058
|
py
|
Python
|
agents/model-based-vc.py
|
asimonw/ai-playground
|
2418868fa473582258409981056369be5370c907
|
[
"MIT"
] | null | null | null |
agents/model-based-vc.py
|
asimonw/ai-playground
|
2418868fa473582258409981056369be5370c907
|
[
"MIT"
] | null | null | null |
agents/model-based-vc.py
|
asimonw/ai-playground
|
2418868fa473582258409981056369be5370c907
|
[
"MIT"
] | null | null | null |
# model-based reflex agent
import random
class Agent:
def __init__(self, world):
self.state = world
# rules mapping states to actions
self.rules = {
'A': {
'clean': 'right',
'dirty': 'suck'
},
'B': {
'clean': 'left',
'dirty': 'suck'
}
}
self.action = None
def __str__(self):
return 'Action: {}\nWorld: {}\n'.format(self.action, self.state)
# in principle, we take both room and floor as inputs, so that we don't
# only rely on our internal model of the world since last perception
def agent_function(self, percept):
"""Takes percept with properties room and floor and returns action"""
room = percept['room']
floor = percept['floor']
self.action = self.rules[room][floor]
def act(self):
"""Update state based on model of the world"""
if self.action:
if self.action == 'suck':
pos = self.state['position']
self.state[pos] = 'clean'
elif self.action == 'left':
self.state['position'] = 'A'
else:
self.state['position'] = 'B'
# this is not really sensing, just relying on internal model
# and previous actions
def sense(self):
"""Returns percept, i.e. room and floor"""
room = self.state['position']
floor = self.state[room]
return { 'room': room, 'floor': floor }
# iterate for predefined number of steps
# TODO: iterate until world is clean, then stop
def clean(vc, initial_percept, iterations):
def step(vc, percept, count):
vc.agent_function(percept)
vc.act()
print(vc)
percept = vc.sense()
count -= 1
if count > 0:
step(vc, percept, count)
step(vc, initial_percept, iterations)
if __name__ == '__main__':
states = ['clean', 'dirty']
locations = ['A', 'B']
# initial state of the world
world = {
'A': random.choice(states),
'B': random.choice(states),
'position': random.choice(locations)
}
vacuum_cleaner = Agent(world)
initial_percept = {
'room': world['position'],
'floor': world[world['position']]
}
print('# Initial state')
print(world)
print ('# And clean...')
clean(vacuum_cleaner, initial_percept, 4)
| 23.123596
| 72
| 0.650632
|
bc1517a6c10f6b6cef12fd31b2272bf330cfc2ae
| 3,516
|
py
|
Python
|
tests/test_tek_tojson.py
|
keiji/probeCOCOATek
|
accc64c66eb30907fda14cb678f17a49ce3e8675
|
[
"MIT"
] | null | null | null |
tests/test_tek_tojson.py
|
keiji/probeCOCOATek
|
accc64c66eb30907fda14cb678f17a49ce3e8675
|
[
"MIT"
] | null | null | null |
tests/test_tek_tojson.py
|
keiji/probeCOCOATek
|
accc64c66eb30907fda14cb678f17a49ce3e8675
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pytest
import requests
import requests_mock
import json
import sys
import os
from datetime import datetime
from probeCOCOATek.probeCOCOATek import probeCOCOATek, AugumentError, ParamError, DataError
class TestTEKToJSON(object):
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
@pytest.mark.parametrize("nocache", [True, False])
def test_tekjson_normal(self, nocache, normal_distribution_url, normal_distribution_json, zip_data):
pCT = probeCOCOATek(normal_distribution_url)
if not nocache:
for k,v in zip_data.items():
with open(os.path.join(pCT.cache_dir, os.path.basename(k)), 'wb') as f:
f.write(bytes.fromhex(v["raw_data"]))
with requests_mock.Mocker() as m:
m.get(normal_distribution_url, content=normal_distribution_json.encode('utf-8'))
for k,v in zip_data.items():
m.get(k, content=bytes.fromhex(zip_data[k]["raw_data"]))
tek_bin = pCT.get_tek_content(k)
js_str = pCT.tek_toJson(tek_bin)
js = json.loads(js_str)
assert js["start_timestamp"] == datetime.fromtimestamp(tek_bin.start_timestamp).astimezone().isoformat()
assert js["end_timestamp"] == datetime.fromtimestamp(tek_bin.end_timestamp).astimezone().isoformat()
assert js["region"] == tek_bin.region
assert js["batch_num"] == tek_bin.batch_num
assert js["batch_size"] == tek_bin.batch_size
assert js["signature_infos"]["verification_key_version"] == tek_bin.signature_infos[0].verification_key_version
assert js["signature_infos"]["verification_key_id"] == tek_bin.signature_infos[0].verification_key_id
assert js["signature_infos"]["signature_algorithm"] == tek_bin.signature_infos[0].signature_algorithm
assert len(js["keys"]) == len(tek_bin.keys)
for k in js["keys"]:
assert k["key_data"] in [tk.key_data.hex() for tk in tek_bin.keys if k["key_data"] == tk.key_data.hex()]
assert k["transmission_risk_level"] in [tk.transmission_risk_level for tk in tek_bin.keys if k["key_data"] == tk.key_data.hex()]
assert k["rolling_start_interval_number"] in [tk.rolling_start_interval_number for tk in tek_bin.keys if k["key_data"] == tk.key_data.hex()]
assert k["rolling_period"] in [tk.rolling_period for tk in tek_bin.keys if k["key_data"] == tk.key_data.hex()]
@pytest.mark.parametrize("nocache", [True, False])
def test_tekjson_invalid_data_error(self, nocache, normal_distribution_url, normal_distribution_json, zip_data, invalid_zip_data):
pCT = probeCOCOATek(normal_distribution_url)
if not nocache:
for k,v in zip_data.items():
with open(os.path.join(pCT.cache_dir, os.path.basename(k)), 'wb') as f:
f.write(bytes.fromhex(v["raw_data"]))
with requests_mock.Mocker() as m:
m.get(normal_distribution_url, content=normal_distribution_json.encode('utf-8'))
for k,v in zip_data.items():
m.get(k, content=bytes.fromhex(zip_data[k]["raw_data"]))
tek_bin = pCT.get_tek_content(k)
with pytest.raises(DataError) as e:
js_str = pCT.tek_toJson(bytes.fromhex(invalid_zip_data))
| 55.809524
| 160
| 0.644767
|
9ab2ce981ba8cbdd84d9f32c35ad92fce8f6288b
| 1,166
|
py
|
Python
|
tests/philip_20190703_example_tree/UK_DWR/collect_sample.py
|
philip-brohan/rda-image-archive
|
acd03656dc313d9a68b318a0c92db07afda654c0
|
[
"MIT"
] | null | null | null |
tests/philip_20190703_example_tree/UK_DWR/collect_sample.py
|
philip-brohan/rda-image-archive
|
acd03656dc313d9a68b318a0c92db07afda654c0
|
[
"MIT"
] | null | null | null |
tests/philip_20190703_example_tree/UK_DWR/collect_sample.py
|
philip-brohan/rda-image-archive
|
acd03656dc313d9a68b318a0c92db07afda654c0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Collect a small sample of images from a UK DWR volume
# For testing the image database
import sys
import os
import glob
import subprocess
# Disk with original images
image_source='/glade/scratch/brohan/Image_disc_copy//Catherine_Ross_DWR/1903A/DWR_1903_10.pdf'
# Take the first n
n_images = 10
target_dir = "%s/%s" % (os.path.dirname(__file__),
'1903')
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
cmd = 'pdfseparate %s %s/page_%%03d.pdf' % (image_source,target_dir)
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
images = sorted(glob.glob("%s/page_*.pdf" % target_dir))
count = 0
for image in images:
if count<n_images:
cmd = "sips -s format jpeg %s --out %s" % (image,
image.replace('pdf','jpg'))
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
os.remove(image)
count += 1
| 26.5
| 94
| 0.609777
|
dc6cdd27954d4e3a8af09ec7390bb4fbd0600892
| 7,959
|
py
|
Python
|
tests/test_examples.py
|
aparamon/bokeh
|
cf6c97e35301adb15dd69eb24f4f8af5e75d64e1
|
[
"BSD-3-Clause"
] | 12
|
2020-07-20T14:58:31.000Z
|
2021-09-04T22:15:14.000Z
|
tests/test_examples.py
|
aparamon/bokeh
|
cf6c97e35301adb15dd69eb24f4f8af5e75d64e1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_examples.py
|
aparamon/bokeh
|
cf6c97e35301adb15dd69eb24f4f8af5e75d64e1
|
[
"BSD-3-Clause"
] | 3
|
2019-03-27T23:27:05.000Z
|
2020-08-05T19:03:19.000Z
|
from __future__ import absolute_import, print_function
import os
import time
import pytest
import subprocess
import platform
import signal
from os.path import basename, dirname, split
import six
from bokeh.server.callbacks import NextTickCallback, PeriodicCallback, TimeoutCallback
from bokeh._testing.util.screenshot import run_in_chrome
from bokeh.client import push_session
from bokeh.command.util import build_single_handler_application
from bokeh.util.terminal import info, fail, ok, red, warn, white
is_windows = platform.system() == "Windows"
pytest_plugins = (
"bokeh._testing.plugins.bokeh_server",
"bokeh._testing.plugins.examples_report",
)
@pytest.mark.examples
def test_js_examples(js_example, example, config, report):
if example.no_js:
if not config.option.no_js:
warn("skipping bokehjs for %s" % example.relpath)
else:
_run_in_browser(example, "file://%s" % example.path, config.option.verbose)
@pytest.mark.examples
def test_file_examples(file_example, example, config, report):
(status, duration, out, err) = _run_example(example)
info("Example run in %s" % white("%.3fs" % duration))
for line in out.split("\n"):
if len(line) == 0 or line.startswith("Wrote "):
continue
info(line, label="PY")
for line in err.split("\n"):
if len(line) == 0:
continue
warn(line, label="PY")
assert status != "timeout", "%s timed out" % example.relpath
assert status == 0, "%s failed to run (exit code %s)" % (example.relpath, status)
if example.no_js:
if not config.option.no_js:
warn("skipping bokehjs for %s" % example.relpath)
else:
_run_in_browser(example, "file://%s.html" % example.path_no_ext, config.option.verbose)
@pytest.mark.examples
def test_server_examples(server_example, example, config, report, bokeh_server):
# mitigate some weird interaction isolated to simple ids, py2.7,
# "push_session" server usage, and TravisCI
if six.PY2: os.environ['BOKEH_SIMPLE_IDS'] = 'no'
app = build_single_handler_application(example.path)
doc = app.create_document()
if six.PY2: del os.environ['BOKEH_SIMPLE_IDS']
# remove all next-tick, periodic, and timeout callbacks
for session_callback in doc.session_callbacks:
if isinstance(session_callback, NextTickCallback):
doc.remove_next_tick_callback(session_callback)
elif isinstance(session_callback, PeriodicCallback):
doc.remove_periodic_callback(session_callback)
elif isinstance(session_callback, TimeoutCallback):
doc.remove_timeout_callback(session_callback)
else:
raise RuntimeError('Unhandled callback type', type(session_callback))
session_id = basename(example.path)
push_session(doc, session_id=session_id)
if example.no_js:
if not config.option.no_js:
warn("skipping bokehjs for %s" % example.relpath)
else:
_run_in_browser(example, "http://localhost:5006/?bokeh-session-id=%s" % session_id, config.option.verbose)
def _get_path_parts(path):
parts = []
while True:
newpath, tail = split(path)
parts.append(tail)
path = newpath
if tail == 'examples':
break
parts.reverse()
return parts
def _print_webengine_output(result):
errors = result['errors']
messages = result['messages']
for message in messages:
level = message['level']
text = message['text']
url = message['url']
line = message['line']
col = message['col']
msg = "{%s} %s:%s:%s %s" % (level, url, line, col, text)
info(msg, label="JS")
for error in errors:
for line in error['text'].split("\n"):
fail(line, label="JS")
def _create_baseline(items):
lines = []
def descend(items, level):
for item in items:
type = item["type"]
bbox = item.get("bbox", None)
children = item.get("children", [])
line = "%s%s" % (" "*level, type)
if bbox is not None:
line += " bbox=[%s, %s, %s, %s]" % (bbox["left"], bbox["top"], bbox["width"], bbox["height"])
line += "\n"
lines.append(line)
descend(children, level+1)
descend(items, 0)
return "".join(lines)
def _run_in_browser(example, url, verbose=False):
start = time.time()
result = run_in_chrome(url)
end = time.time()
info("Example rendered in %s" % white("%.3fs" % (end - start)))
success = result["success"]
timeout = result["timeout"]
errors = result["errors"]
state = result["state"]
image = result["image"]
no_errors = len(errors) == 0
if timeout:
warn("%s %s" % (red("TIMEOUT:"), "bokehjs did not finish"))
if verbose:
_print_webengine_output(result)
assert success, "%s failed to load" % example.relpath
has_image = image is not None
has_state = state is not None
has_baseline = example.has_baseline
baseline_ok = True
if not has_state:
fail("no state data was produced for comparison with the baseline")
else:
new_baseline = _create_baseline(state)
example.store_baseline(new_baseline)
if not has_baseline:
fail("%s baseline doesn't exist" % example.baseline_path)
else:
result = example.diff_baseline()
if result is not None:
baseline_ok = False
fail("BASELINE DOESN'T MATCH (make sure to update baselines before running tests):")
for line in result.split("\n"):
fail(line)
example.store_img(image["data"])
ref = example.fetch_ref()
if not ref:
warn("reference image %s doesn't exist" % example.ref_url)
if example.no_diff:
warn("skipping image diff for %s" % example.relpath)
elif not has_image:
fail("no image data was produced for comparison with the reference image")
elif ref:
pixels = example.image_diff()
if pixels != 0:
comment = white("%.02f%%" % pixels) + " of pixels"
warn("generated and reference images differ: %s" % comment)
else:
ok("generated and reference images match")
assert no_errors, "%s failed with %d errors" % (example.relpath, len(errors))
assert has_state, "%s didn't produce state data" % example.relpath
assert has_baseline, "%s doesn't have a baseline" % example.relpath
assert baseline_ok, "%s's baseline differs" % example.relpath
def _run_example(example):
code = """\
__file__ = filename = '%s'
import random
random.seed(1)
import numpy as np
np.random.seed(1)
import warnings
warnings.filterwarnings("ignore", ".*", UserWarning, "matplotlib.font_manager")
with open(filename, 'rb') as example:
exec(compile(example.read(), filename, 'exec'))
""" % example.path.replace("\\", "\\\\")
cmd = ["python", "-c", code]
cwd = dirname(example.path)
env = os.environ.copy()
env['BOKEH_IGNORE_FILENAME'] = 'true'
env['BOKEH_RESOURCES'] = 'relative'
env['BOKEH_MINIFIED'] = 'false'
env['BOKEH_BROWSER'] = 'none'
class Timeout(Exception):
pass
if not is_windows:
def alarm_handler(sig, frame):
raise Timeout
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(20 if not example.is_slow else 60)
start = time.time()
try:
proc = subprocess.Popen(cmd, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
status = proc.wait()
except Timeout:
proc.kill()
status = 'timeout'
finally:
if not is_windows:
signal.alarm(0)
end = time.time()
out = proc.stdout.read().decode("utf-8")
err = proc.stderr.read().decode("utf-8")
return (status, end - start, out, err)
| 30.147727
| 114
| 0.633371
|
51a74116104251219db3ec4d78c429ac6e3973d1
| 23,632
|
py
|
Python
|
vaca/ipython_config.py
|
carrerasrodrigo/vaca
|
b8f6c6a038882cc7d786d80b959c2a2fdfaae689
|
[
"BSD-3-Clause"
] | null | null | null |
vaca/ipython_config.py
|
carrerasrodrigo/vaca
|
b8f6c6a038882cc7d786d80b959c2a2fdfaae689
|
[
"BSD-3-Clause"
] | null | null | null |
vaca/ipython_config.py
|
carrerasrodrigo/vaca
|
b8f6c6a038882cc7d786d80b959c2a2fdfaae689
|
[
"BSD-3-Clause"
] | null | null | null |
# Configuration file for ipython.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# Reraise exceptions encountered loading IPython extensions?
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# Whether to create profile dir if it doesn't exist
# c.BaseIPythonApplication.auto_create = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.BaseIPythonApplication.copy_config_files = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.BaseIPythonApplication.extra_config_file = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.BaseIPythonApplication.ipython_dir = ''
# Whether to overwrite existing config files when copying
# c.BaseIPythonApplication.overwrite = False
# The IPython profile to use.
# c.BaseIPythonApplication.profile = 'default'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.BaseIPythonApplication.verbose_crash = False
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
#------------------------------------------------------------------------------
# InteractiveShell configuration
#------------------------------------------------------------------------------
# An enhanced, interactive shell for Python.
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.InteractiveShell.ast_node_interactivity = 'last_expr'
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.InteractiveShell.ast_transformers = []
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.InteractiveShell.autocall = 0
# Autoindent IPython code entered interactively.
# c.InteractiveShell.autoindent = True
# Enable magic commands to be called without the leading %.
# c.InteractiveShell.automagic = True
# The part of the banner to be printed before the profile
# c.InteractiveShell.banner1 = 'Python 3.5.1 (default, May 12 2016, 19:06:45) \nType "copyright", "credits" or "license" for more information.\n\nIPython 4.2.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# The part of the banner to be printed after the profile
# c.InteractiveShell.banner2 = ''
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.InteractiveShell.cache_size = 1000
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.InteractiveShell.color_info = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.InteractiveShell.colors = 'Linux'
#
# c.InteractiveShell.debug = False
# **Deprecated**
#
# Will be removed in IPython 6.0
#
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). `deep_reload`
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.InteractiveShell.deep_reload = False
# Don't call post-execute functions that have failed in the past.
# c.InteractiveShell.disable_failing_post_execute = False
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.InteractiveShell.display_page = False
#
# c.InteractiveShell.history_length = 10000
# The number of saved history entries to be loaded into the readline buffer at
# startup.
# c.InteractiveShell.history_load_length = 1000
#
# c.InteractiveShell.ipython_dir = ''
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.InteractiveShell.logappend = ''
# The name of the logfile to use.
# c.InteractiveShell.logfile = ''
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.InteractiveShell.logstart = False
# Save multi-line entries as one entry in readline history
# c.InteractiveShell.multiline_history = True
#
# c.InteractiveShell.object_info_string_level = 0
# Automatically call the pdb debugger after every exception.
# c.InteractiveShell.pdb = False
# Deprecated, will be removed in IPython 5.0, use PromptManager.in_template
# c.InteractiveShell.prompt_in1 = 'In [\\#]: '
# Deprecated, will be removed in IPython 5.0, use PromptManager.in2_template
# c.InteractiveShell.prompt_in2 = ' .\\D.: '
# Deprecated, will be removed in IPython 5.0, use PromptManager.out_template
# c.InteractiveShell.prompt_out = 'Out[\\#]: '
# Deprecated, will be removed in IPython 5.0, use PromptManager.justify
# c.InteractiveShell.prompts_pad_left = True
#
# c.InteractiveShell.quiet = False
#
# c.InteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
#
# c.InteractiveShell.readline_remove_delims = '-/~'
#
# c.InteractiveShell.readline_use = True
#
# c.InteractiveShell.separate_in = '\n'
#
# c.InteractiveShell.separate_out = ''
#
# c.InteractiveShell.separate_out2 = ''
# Show rewritten input, e.g. for autocall.
# c.InteractiveShell.show_rewritten_input = True
#
# c.InteractiveShell.wildcards_case_sensitive = True
#
# c.InteractiveShell.xmode = 'Context'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vi'
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
#
# c.PromptManager.color_scheme = 'Linux'
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
#------------------------------------------------------------------------------
# HistoryAccessorBase configuration
#------------------------------------------------------------------------------
# An abstract class for History Accessors
#------------------------------------------------------------------------------
# HistoryAccessor configuration
#------------------------------------------------------------------------------
# Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryAccessor.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryAccessor.enabled = True
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryAccessor.hist_file = ''
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# BaseFormatter configuration
#------------------------------------------------------------------------------
# A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
#
# c.BaseFormatter.deferred_printers = {}
#
# c.BaseFormatter.enabled = True
#
# c.BaseFormatter.singleton_printers = {}
#
# c.BaseFormatter.type_printers = {}
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
#
# c.PlainTextFormatter.float_precision = ''
# Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# c.PlainTextFormatter.max_seq_length = 1000
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.verbose = False
#------------------------------------------------------------------------------
# Completer configuration
#------------------------------------------------------------------------------
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.Completer.greedy = False
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
#------------------------------------------------------------------------------
# Magics configuration
#------------------------------------------------------------------------------
# Base class for implementing magic functions.
#
# Shell functions which can be reached as %function_name. All magic functions
# should accept a string, which they can parse for their own needs. This can
# make some functions easier to type, eg `%cd ../` vs. `%cd("../")`
#
# Classes providing magic functions need to subclass this class, and they MUST:
#
# - Use the method decorators `@line_magic` and `@cell_magic` to decorate
# individual methods as magic functions, AND
#
# - Use the class decorator `@magics_class` to ensure that the magic
# methods are properly registered at the instance level upon instance
# initialization.
#
# See :mod:`magic_functions` for examples of actual implementation classes.
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
import os
import vaca
v = vaca.Vaca(config_file=os.environ['VACA_CONNECTION_CONFIG'],
connection_name=os.environ.get('VACA_DEFAULT_CONNECTION', 'default'))
print('\n')
print(" (__) / \ ")
print(" (oo) ( muuuuuuuuuuuu! )")
print(" /-------\/ --'\________________________/ ")
print(" / | ||")
print(" * ||----|| ")
print(" ^^ ^^ ")
print(" Vaca ")
print('*' * 50)
print('\nVaca loaded, please use v.q(...\n')
print('*' * 50)
| 37.69059
| 409
| 0.633125
|
350501b67a112814d031498be6698325b7708214
| 5,320
|
py
|
Python
|
ros/src/util/packages/autoware_bag_tools/scripts/change_frame_id.py
|
GeoBIMpro/MAS
|
60dc4745efb86fc1796e672f5825fa9c54940919
|
[
"BSD-3-Clause"
] | 5
|
2018-06-20T08:29:21.000Z
|
2018-11-12T06:05:52.000Z
|
ros/src/util/packages/autoware_bag_tools/scripts/change_frame_id.py
|
donrv/MAS
|
60dc4745efb86fc1796e672f5825fa9c54940919
|
[
"BSD-3-Clause"
] | null | null | null |
ros/src/util/packages/autoware_bag_tools/scripts/change_frame_id.py
|
donrv/MAS
|
60dc4745efb86fc1796e672f5825fa9c54940919
|
[
"BSD-3-Clause"
] | 1
|
2019-01-21T13:38:45.000Z
|
2019-01-21T13:38:45.000Z
|
#!/usr/bin/python
"""
Copyright (c) 2018, Nagoya University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Autoware nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
v1.0 Jacob Lambert 2018-05-30
Copyright (c) 2012,
Systems, Robotics and Vision Group
University of the Balearican Islands
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Systems, Robotics and Vision Group, University of
the Balearican Islands nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
PKG = 'autoware_bag_tools' # this package name
import roslib; roslib.load_manifest(PKG)
import rospy
import rosbag
import os
import sys
import argparse
import platform
def change_frame_id(inbag,outbag,frame_ids,topics):
rospy.loginfo(' Processing input bagfile: %s', inbag)
rospy.loginfo(' Writing to output bagfile: %s', outbag)
rospy.loginfo(' Changing topics: %s', topics)
rospy.loginfo(' Writing frame_ids: %s', frame_ids)
outbag = rosbag.Bag(outbag,'w')
for topic, msg, t in rosbag.Bag(inbag,'r').read_messages():
if topic in topics:
if msg._has_header:
if len(frame_ids) == 1:
msg.header.frame_id = frame_ids[0]
else:
idx = topics.index(topic)
msg.header.frame_id = frame_ids[idx]
outbag.write(topic, msg, t)
rospy.loginfo('Closing output bagfile and exit...')
outbag.close()
if __name__ == "__main__":
rospy.init_node('change_frame_id')
parser = argparse.ArgumentParser(
description='Create a new bagfile from an existing one replacing the frame ids of requested topics.')
parser.add_argument('-o', metavar='OUTPUT_BAGFILE', required=True, help='output bagfile')
parser.add_argument('-i', metavar='INPUT_BAGFILE', required=True, help='input bagfile')
parser.add_argument('-f', metavar='FRAME_ID', required=True, help='desired frame_ids name in the topics. If there is '
'one frame ID, all topics are changed to that ID. '
'If there is more than one frame ID, one topic per '
'frame ID is expected.', nargs='+')
parser.add_argument('-t', metavar='TOPIC', required=True, help='topic(s) to change', nargs='+')
args = parser.parse_args()
# Check
if len(args.f) != 1 and len(args.f) != len(args.t):
raise ValueError("Number of frame IDs given must be 1 or equal to the number of topics. Aborting")
try:
change_frame_id(args.i,args.o,args.f,args.t)
except Exception, e:
import traceback
traceback.print_exc()
| 46.666667
| 120
| 0.729699
|
f57f9dc71be08487f6b0b6bcf4706776452e6c6c
| 3,998
|
py
|
Python
|
survol/sources_types/CIM_DataFile/portable_executable/pefile_exports.py
|
AugustinMascarelli/survol
|
7a822900e82d1e6f016dba014af5741558b78f15
|
[
"BSD-3-Clause"
] | null | null | null |
survol/sources_types/CIM_DataFile/portable_executable/pefile_exports.py
|
AugustinMascarelli/survol
|
7a822900e82d1e6f016dba014af5741558b78f15
|
[
"BSD-3-Clause"
] | null | null | null |
survol/sources_types/CIM_DataFile/portable_executable/pefile_exports.py
|
AugustinMascarelli/survol
|
7a822900e82d1e6f016dba014af5741558b78f15
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
Pefile exports
"""
# BEWARE: Do NOT rename it as stat.py otherwise strange errors happen,
# probably a collision of modules names, with the message:
# "Fatal Python error: Py_Initialize: can't initialize sys standard streams"
import os
import sys
import time
import lib_util
import lib_uris
import lib_common
import lib_properties
from lib_properties import pc
# This can work only on Windows and with exe files.
import pefile
import lib_pefile
#Usable = lib_util.UsableWindowsBinary
def pefileDecorate( grph, rootNode, pe ):
for fileinfo in pe.FileInfo:
if fileinfo.Key == 'StringFileInfo':
for st in fileinfo.StringTable:
for entry in st.entries.items():
#UnicodeEncodeError: 'ascii' codec can't encode character u'\xa9' in position 16: ordinal not in range(128)
# sys.stderr.write("%s %s\n"% (entry[0], entry[1]) )
key = entry[0]
val = entry[1]
if val is None:
val = "None"
else:
val = val.encode("ascii", errors="replace")
# val = val.encode("utf-8", errors="replace")
# val = val[:2]
# sys.stderr.write("%s %s\n"% (key,val) )
grph.add( ( rootNode, lib_common.MakeProp(key), lib_common.NodeLiteral(val) ) )
return
def Main():
cgiEnv = lib_common.CgiEnv()
filNam = cgiEnv.GetId()
DEBUG("filNam=%s", filNam )
filNode = lib_common.gUriGen.FileUri(filNam )
try:
pe = pefile.PE(filNam)
except Exception:
exc = sys.exc_info()[1]
lib_common.ErrorMessageHtml("File: %s. Exception:%s:" % ( filNam, str(exc)))
# sys.stderr.write("%s\n" % hex(pe.VS_VERSIONINFO.Length) )
# sys.stderr.write("%s\n" % hex(pe.VS_VERSIONINFO.Type) )
# sys.stderr.write("%s\n" % hex(pe.VS_VERSIONINFO.ValueLength) )
# sys.stderr.write("%s\n" % hex(pe.VS_FIXEDFILEINFO.Signature) )
# sys.stderr.write("%s\n" % hex(pe.VS_FIXEDFILEINFO.FileFlags) )
# sys.stderr.write("%s\n" % hex(pe.VS_FIXEDFILEINFO.FileOS) )
# for fileinfo in pe.FileInfo:
# if fileinfo.Key == 'StringFileInfo':
# for st in fileinfo.StringTable:
# for entry in st.entries.items():
# #UnicodeEncodeError: 'ascii' codec can't encode character u'\xa9' in position 16: ordinal not in range(128)
# # sys.stderr.write("%s %s\n"% (entry[0], entry[1]) )
# key = entry[0]
# val = entry[1]
# key = key
# if val is None:
# val = "None"
# else:
# val = val.encode("ascii", errors="replace")
# # val = val.encode("utf-8", errors="replace")
# # val = val[:2]
# sys.stderr.write("%s %s\n"% (key,val) )
# elif fileinfo.Key == 'VarFileInfo':
# for var in fileinfo.Var:
# sys.stderr.write('%s: %s\n' % var.entry.items()[0] )
#
# If the PE file was loaded using the fast_load=True argument, we will need to parse the data directories:
# pe.parse_data_directories()
grph = cgiEnv.GetGraph()
try:
propForward = lib_common.MakeProp("Forward")
propAddress = lib_common.MakeProp("Address")
propOrdinal = lib_common.MakeProp("Ordinal")
for exp in pe.DIRECTORY_ENTRY_EXPORT.symbols:
# sys.stderr.write("\t%s %s %d\n"% ( hex(pe.OPTIONAL_HEADER.ImageBase + exp.address), exp.name, exp.ordinal ) )
decodedSymNam = lib_pefile.UndecorateSymbol(exp.name)
symNode = lib_uris.gUriGen.SymbolUri( decodedSymNam, filNam )
grph.add( ( filNode, pc.property_symbol_defined, symNode ) )
forward = exp.forwarder
if not forward:
forward = ""
grph.add( ( symNode, propForward, lib_common.NodeLiteral(forward) ) )
grph.add( ( symNode, propAddress, lib_common.NodeLiteral(hex(exp.address)) ) )
grph.add( ( symNode, propOrdinal, lib_common.NodeLiteral(hex(exp.ordinal)) ) )
# grph.add( ( symNode, lib_common.MakeProp("Rest"), lib_common.NodeLiteral(dir(exp)) ) )
except Exception:
exc = sys.exc_info()[1]
lib_common.ErrorMessageHtml("File: %s. Exception:%s:" % ( filNam, str(exc)))
# cgiEnv.OutCgiRdf()
# cgiEnv.OutCgiRdf("LAYOUT_TWOPI")
cgiEnv.OutCgiRdf("LAYOUT_RECT",[pc.property_symbol_defined])
if __name__ == '__main__':
Main()
| 33.596639
| 114
| 0.674087
|
1fd9df89bcd483b2762da29e519cc88759b533c6
| 98,386
|
py
|
Python
|
QUEEN/qobj.py
|
yachielab/QUEEN
|
94ed667dbb3b95712e5a2473afc889d625c7824a
|
[
"MIT"
] | 9
|
2021-11-09T23:25:50.000Z
|
2021-12-31T23:53:08.000Z
|
QUEEN/qobj.py
|
yachielab/QUEEN
|
94ed667dbb3b95712e5a2473afc889d625c7824a
|
[
"MIT"
] | null | null | null |
QUEEN/qobj.py
|
yachielab/QUEEN
|
94ed667dbb3b95712e5a2473afc889d625c7824a
|
[
"MIT"
] | null | null | null |
import sys
import copy
import urllib
import tempfile
import requests
from bs4 import BeautifulSoup
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation, CompoundLocation, FeatureLocation, ExactPosition
from functools import total_ordering
sys.path.append("/".join(__file__.split("/")[:-1]))
from qfunction import *
from quine import *
from qint import Qint
from qseq import Qseq
def _combine_history(dna, history_features):
history_feature = SeqFeature(FeatureLocation(0, len(dna.seq), strand=0), type="source")
history_feature = history_features[0].__class__(history_feature, subject=dna.seq)
for feat in history_features:
for key in feat.qualifiers:
if "building_history" in key and feat.qualifiers[key] not in history_feature.qualifiers.values():
history_feature.qualifiers[key] = feat.qualifiers[key]
return history_feature
@total_ordering
class DNAfeature(SeqFeature):
"""DNA feature object
Each DNAfeature object with the following attributes provides an annotation for a
given range of DNA sequence in a QUEEN object.
"""
def __deepcopy__(self, memo):
obj = DNAfeature(self, subject=self.subject)
return obj
def __getattribute__(self, name):
if name == "strand":
return self.location.strand if self.location.strand is not None else 0
else:
return super().__getattribute__(name)
def __getattr__(self, name):
if name == "feature_id":
if "_id" in self.__dict__:
return self._id
else:
return None
elif name == "feature_type":
return self.type
elif name == "_original":
seq = self.subject.printsequence(self.start, self.end, self.location.strand if self.location.strand !=0 else 1)
elif name == "original":
if "_original" in self.__dict__:
return self._original
else:
return self.subject.printsequence(self.start, self.end, self.location.strand if self.location.strand !=0 else 1)
elif name == "seq" or name == "sequence":
seq = self.subject.printsequence(self.start, self.end, self.location.strand if self.location.strand !=0 else 1)
seq = Qseq(seq)
seq.qkey = self._start.qkey
seq.parental_id = self._start.parental_id
seq.parent = self
seq.parental_class = "DNAfeature"
seq = self.subject.printsequence(self.start, self.end, self.location.strand if self.location.strand !=0 else 1)
return seq
elif name == "strand":
return self.location.strand if self.location.strand is not None else 0
elif name == "start":
return self._start
elif name == "end":
return self._end
elif name == "span":
return (self.start, self.end)
else:
raise AttributeError("DNAfeature obejct has no attribute '{}'".format(name))
def __eq__(self, other):
if not isinstance(other, DNAfeature):
return NotImplemented
else:
if self.qualifiers == other.qualifiers and (self.subject is not None and other.subject is not None and self.seq == other.seq):
return True
else:
return False
def __lt__(self, other):
return NotImplemented
def __setattr__(self, key, value):
if key in ["feature_id", "feature_type", "seq", "sequenece", "original", "strand", "start", "end", "span"]:
raise AttributeError("'DNAfeature' object attribute '{}' is read-only".format(key))
else:
super.__setattr__(self, key, value)
def __init__(self, feature=None, location=None, type="misc_feature", subject=None, query=None):
if feature is None:
SeqFeature.__init__(self, location, type)
else:
for key in feature.__dict__:
if key in ["_start", "_end", "__digestion_topl", "_digestion_topr", "_digestion_bottomr", "_digestion_bottoml", "subject", "query"]:
pass
else:
if key == "_original":
self._original = str(feature._original)
else:
self.__dict__[key] = copy.deepcopy(feature.__dict__[key])
#start->end direction should be 5' to 3' on the top strand.
if self.location.strand == -1:
self._start = self.location.parts[-1].start.position
self._end = self.location.parts[0].end.position
else:
self._start = self.location.parts[0].start.position
self._end = self.location.parts[-1].end.position
self._qkey = None #ID for features_dict
self._second_id = None #ID for a single feature
self._start = Qint(self._start)
self._end = Qint(self._end)
self._start.parent = self
self._end.parent = self
self._start.name = "start"
self._end.name = "end"
if subject is None:
pass
else:
self._digestion_topl = "null"
self._digestion_topr = "null"
self._digestion_bottoml = "null"
self._digestion_bottomr = "null"
self.subject = subject
self.query_seq = query
self._dnafeature = 1
def set_position(self, position, attribute):
if self.subject.topology == "linear" and position[0] > position[1] and attribute == "start":
raise ValueError("'start' value should be smaller than 'end' value. First, please change 'end' value")
elif self.subject.topology == "linear" and position[0] > position[1] and attribute =="end":
raise ValueError("'end' value should be larger than 'start' value. First, please change 'start' value")
strand = self.location.strand
if type(position[0]) is int and type(position[1]) is int:
s = position[0]
e = position[1]
if s < e:
location = FeatureLocation(s, e, strand)
else:
location = CompoundLocation([FeatureLocation(s, len(self.subject.seq)), FeatureLocation(0, e, strand)])
self.location = location
elif type(position[0]) is list and type(position[1]) is list:
locations = []
strand = self.location.strand
for s,e in zip(value[0], value[1]):
if s < e:
location = FeatureLocation(s, e, strand)
locations.append(location)
else:
loc1 = FeatureLocation(s, len(self.subject.seq), strand)
loc2 = FeatureLocation(0, e, strand)
locations.append(loc1)
locations.append(loc2)
if strand == -1:
locations.reverse()
self.location = CompoundLocation(locations, type=feat.type)
if self.location.strand == -1:
self._start = self.location.parts[-1].start.position
self._end = self.location.parts[0].end.position
else:
self._start = self.location.parts[0].start.position
self._end = self.location.parts[-1].end.position
self._start = Qint(self._start)
self._end = Qint(self._end)
self._start.name = "start"
self._end.name = "end"
class QUEEN():
"""QUEEN object
The QUEEN class can define a dsDNA object with sequence annotations. It can be
created by specifying a DNA sequence or importing a sequence data file in
GenBank or FASTA file format (single sequence entry). When a GenBank format file
is imported, its NCBI accession number, Addgene plasmid ID, or Benchling share
link can be provided instead of downloading the file to your local environment.
"""
#Class variables that manage the execution histories of operational and search function
dna_dict = {}
queried_feature_dict = {}
queried_features_dict = {}
queried_features_name_dict = {}
process_description = None
_namespace = {}
_products = {}
_processes = {}
_namespaceflag = 0
_num_history = 1
_qnum = 0
_keep = 1
_source = None
_project = None
def set_project(value=None):
QUEEN._project = value
def _get_genbank(_id, dbtype="ncbi"):
"""
Dbtype can be selected from "ncbi", "addgene", "benchling".
For "ncbi", set NCBI accession number.
For "addgene", set plasmid ID. Sometimes different full sequence maps are
provided by the depositor and adgene, respectively, for a single plasmid.
In this case, please specify the plasmid ID followed by "addgene" or
"depositor" (Ex. 50005:addgene or 50005:depositor). If you set only plasmid ID,
the value will be specified as "plsmidID:addgene".
For "benchling", set a benchling shaared link
"""
outb = io.BytesIO()
outs = io.StringIO()
headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0"}
if dbtype == "ncbi":
url = "https://www.ncbi.nlm.nih.gov/sviewer/viewer.cgi?tool=portal&save=file&log$=seqview&db=nuccore&report=gbwithparts&id={}&withparts=on".format(_id)
elif dbtype == "addgene":
if ":" not in _id:
_id = _id + ":addgene"
site = "https://www.addgene.org/{}/sequences/".format(_id.split(":")[0])
html = requests.get(site)
soup = BeautifulSoup(html.content, "html.parser")
url = soup.find(id="{}-full".format(_id.split(":")[1])).find(class_="genbank-file-download").get("href")
elif dbtype == "benchling":
url = _id + ".gb"
elif dbtype == "googledrive":
match = re.search("https://drive\.google\.com/file/d/(.+)/view\?usp=sharing", url)
fileid = match.group(1)
url = "https://drive.google.com/uc?export=download&id=" + fileid
else:
raise ValueError("'datatype' can take only one of 'ncbi,' 'addgeen,' and 'benchling.'")
request = urllib.request.Request(url, headers=headers)
with urllib.request.urlopen(request) as u:
outb.write(u.read())
outs.write(outb.getvalue().decode())
return outs
def _check_seq(seq):
top, bottom = seq.split("/")
new_seq = ""
new_top = ""
new_bottom = ""
if len(top) != len(bottom):
return False, False, False
for t,b in zip(top,bottom):
if t != b.translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB")) and (t != "-" and b != "-"):
return False, False, False
else:
new_top += t
new_bottom += b
if t == "-":
new_seq += b.translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB"))
else:
new_seq += t
return new_top, new_bottom, new_seq
def __deepcopy__(self, memo):
obj = QUEEN(seq=self.seq, _direct=0)
for key in self.__dict__:
if key == "seq":
pass
elif key == "_history_feature":
if self._history_feature is None:
obj._history_feature = None
else:
obj._history_feature = DNAfeature(self._history_feature, subject=obj)
elif key == "_dnafeatures":
feats = []
for feat in self.dnafeatures:
feats.append(DNAfeature(feat, subject=obj))
obj._dnafeatures = feats
else:
obj.__dict__[key] = copy.copy(self.__dict__[key])
return obj
def __eq__(self, other):
if not isinstance(other, QUEEN):
return NotImplemented
else:
if self.seq == other.seq:
pass
else:
return False, 1
if self.topology == "linear" and self.topology == other.topology:
if self._left_end == other._left_end and self._left_end_top == other._left_end_top and self._left_end_bottom == other._left_end_bottom:
pass
else:
return False, 2
if self._right_end == other._right_end and self._right_end_top == other._right_end_top and self._right_end_bottom == other._right_end_bottom:
pass
else:
return False, 3
if len(self.dnafeatures) == len(other.dnafeatures):
flag = 0
for feat1, feat2 in zip(self.dnafeatures, other.dnafeatures):
if feat1.type == "source" or feat2.type == "source":
pass
else:
if feat1 == feat2:
flag = 1
else:
flag = 0
if flag == 1:
return True
if flag == 0:
for feat1 in self.dnafeatures:
flag = 0
for feat2 in other.dnafeatures:
if feat1 == feat2:
flag = 1
break
else:
pass
if flag == 0:
return False, 4
else:
pass
return True
else:
return False, 5
def __repr__(self):
if len(self.seq) > 50:
out = "<queen.QUEEN object; project='{}', length='{} bp', topology='{}'>".format(self.project, len(self.seq), self.topology)
else:
out = "<queen.QUEEN object; project='{}', length='{} bp', sequence='{}', topology='{}'>".format(self.project, len(self.seq), self.seq, self.topology)
return out
def __setattr__(self, key, value):
if key == "_unique_id":
if "_unique_id" in self.__dict__:
if value in self.__class__.dna_dict:
if value.split("_")[-1].isdecimal() == True:
value = "_".join(value.split("_")[:-1])
unique = 0
while value + "_" + str(unique) in self.__class__.dna_dict:
unique += 1
_unique_id = value + "_" + str(unique)
else:
_unique_id = value
QUEEN.dna_dict[_unique_id] = None
super.__setattr__(self, "_unique_id", _unique_id)
else:
QUEEN.dna_dict[value] = None
super.__setattr__(self, "_unique_id", value)
elif key == "_product_id":
if value in self.__class__._products:
if value.split("_")[-1].isdecimal() == True:
value = "_".join(value.split("_")[:-1])
unique = 0
while value + "_" + str(unique) in self.__class__._products:
unique += 1
_product_id = value + "_" + str(unique)
else:
_product_id = value
self.__class__._products[_product_id] = None
super.__setattr__(self, "_product_id", _product_id)
elif key in ("seq", "rcseq", "dnafeatures", "productdict", "prcessdict", "project", "topology"):
raise AttributeError("'QUEEN' object attribute '{}' is read-only".format(key))
#raise ValueError("Cannot assign to '{}' attribute.".format(key))
else:
super.__setattr__(self, key, value)
def __getattribute__(self, name):
if name == "features_dict":
return dict(list(map(lambda x:(x._id, x), self.dnafeatures)))
elif name == "_seq":
qseq = Qseq(super().__getattribute__(name))
if "_product_id" in self.__dict__:
qseq.parental_id = self._product_id
elif "_unique_id" in self.__dict__:
qseq.parental_id = self._unique_id
qseq.parent = self
qseq.parental_class = "QUEEN"
return qseq
else:
return super().__getattribute__(name)
def __getattr__(self, name):
if name == "history":
histories = []
for key in self._history_feature.qualifiers:
if "building_history" in key[0:18]:
history = self._history_feature.qualifiers[key]
histories.append((int(key.split("_")[-1]), history[0], history[1], history[2]))
histories.sort()
return histories
elif name == "seq":
return self._seq
elif name == "topology":
return self._topology
elif name == "rcseq":
rcseq = self._seq.upper().translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB"))[::-1]
rcseq = Qseq(rcseq)
if "_product_id" in self.__dict__:
rcseq.parental_id = self._product_id
elif "_unique_id" in self.__dict__:
rcseq.parental_id = self._unique_id
rcseq.parent = self
rcseq.parental_class = "QUEEN"
rcseq.name = "rcseq"
return rcseq
elif name == "project":
return self._product_id
elif name == "sequence":
return self._seq
elif name == "dnafeatures":
return self._dnafeatures
elif name == "productdict":
if self._load_history == 1 or self._load_history == -1:
pass
elif self._load_history == 0:
try:
quine(self, execution=True, _io=False)
except:
pass
return dict(zip(self._productids, [QUEEN._products[key] for key in self._productids]))
elif name == "processdict":
return dict(zip(self._processids, [QUEEN._processes[key] for key in self._processids]))
else:
raise AttributeError("QUEEN obejct has no attribute '{}'".format(name))
def __init__(self, seq=None, record=None, fileformat=None, dbtype="local", topology="linear", ssdna=False, import_history=True, setfeature=False, project=None, product=None, process_name=None, process_description=None,
pd=None, pn=None, process_id=None, original_ids=[], _sourcefile=None, quinable=True, _direct=1):
"""
Parameters
----------
seq : str
DNA seqeunce of `QUEEN_ object` to be generated.
record : str
Source GenBank or FASTA file path of `QUEEN_object` to be generated.
When a GenBank format file is imported, its NCBI accession number, Addgene
plasmid ID, or Benchling share link can be provided instead of downloading
the file to your local environment.
fileformat : str ("fasta" or "genbank"), default: None
If the value is specified the file specified on `record` interpereted as
`fileformat`. Otherwise, the file format automatically detected based on
its file name.
dptype : str ("local", "NCBI", "addgene", or "benchling")
Online database location of the GenBank file to be imported.
topology : str ("linear" or "circular")
Sequence topology. When a `QUEEN_object` is created by loading from a GenBank
file, the topology is set according to the description in the GenBank file.
ssdna : bool, default: False
If True, QUEEN object will handled as ssDNA. ssDNA QUEEN object cannot be
processed with modify ends and, be joined with dsDNA QUEEN object. By annealing
ssDNA QUEEN object trough `joindna` function, dsDNA QUEEN object can be generated.
import_history : bool, default: True
If False, it disable the inheritance of operational process histories of previously
generated `QUEEN_objects` to a newly producing `QUEEN_object`.
product : str
This parameter enables users to provide label names for producing `QUEEN_objects`.
The provided labels are stored in `QUEEN_objects.project`.
If tha value is not specified, and a `QUEEN_object` is created from a GenBank or
FASTA file, its sequence ID will be inherited here.
Attribuites
-----------
project : str
Project name of `QUEEN_object` construction. In QUEEN, this property is also used
as a dictionary key to access the `.productdict` described below.
If a `QUEEN_object` is created from a GenBank or FASTA file, its sequence ID will
be inherited here. Otherwise, the project name is automatically generated or
defined based on the `product` value to be unique amongst the existing
`.productdict` keys.
seq : str
Top strand sequence (5′→3′). This property cannot be directly edited; only the
built-in operational functions of QUEEN described below can edit this property.
rcseq : str
Bottom strand sequence (5′→3′). This property cannot be directly edited; only the
built-in operational functions of QUEEN described below can edit this property.
topology : str ("linear" or "circular")
Sequence topology. When a `QUEEN_object` is created by loading from a GenBank file,
the topology is set according to the description in the GenBank file.
Only the built-in operational functions of QUEEN described below can edit this
property.
dnafeatures : list of `DNAfeature_objects`
When a `QUEEN_object` is loaded from a GenBank file, `.dnafeatures` will
automatically be generated from the GenBank file's sequence features.
Otherwise,`.dnafeatures` will be an empty list. Each `DNAfeature_object` with the
following attributes provides an annotation for a given range of DNA sequence in
a `QUEEN_object`.
- feature_id : str
Unique identifier. It is automatically determined to each feature when a `QUEEN_object`
is loaded from a GenBank file.
- feature_type : str
Biological nature. Any value is acceptable. The GenBank format requires registering a
biological nature for each feature.
- start : int
Start position of `DNAfeature_object` in `QUEEN_object`.
- end : int
Start position of `DNAfeature_object` in `QUEEN_object`.
- strand : int (1 or -1)
Direction of `DNAfeature_object` in `QUEEN_object`.
Top strand (`1`) or bottom strand (`-1`).
- sequence : str
Sequence of the `DNAfeature_object` for its encoded direction.
- qualifiers : dict
Qualifiers. When a GenBank file is imported, qualifiers of each feature will be
registered here. Qualifier names and values will serve as dictionary keys and
values, respectively.
`DNAfeature_object` can be edited only by the `editfeature()` function described
below. DNAfeature class is implemented as a subclass of the Biopython SeqFeature
class. Therefore, apart from the above attributes, DNAfeature class inherits all
the attributes and methods of SeqFeature class. For details about SeqFeature class,
see (https://biopython.org/docs/dev/api/Bio.SeqFeature.html)
productdict : dict
Dictionary for all of the inherited `QUEEN_objects` used to construct the present
`QUEEN_object`. The `.project` of each `QUEEN_object` serves as a key of this
dictionary.
"""
if ssdna == True and topology == "circular":
raise TypeError("The parameters of 'topology=circulr' and 'ssdna=True' cannot be specified simultaneously.")
fseq = seq
frecord = record
fdbtype = dbtype
fproject = project
ftopology = topology
fproduct = product
project = project if product is None else product
process_name = pn if process_name is None else process_name
process_description = pd if process_description is None else process_description
self._seq = None
self.record = None
self._dnafeatures = None
self._right_end = None
self._left_end = None
self._history_feature = None
self._ssdna = ssdna
self._productids = []
self._processids = []
if seq is None and record is None:
if project is None:
project = "dna"
self._topology = topology
if "_" in project:
project = project.replace("_","-")
self._left_end_top = 1
self._left_end_bottom = 1
self._right_end_top = 1
self._right_end_bottom = 1
elif seq is None or "." in seq:
if "." in str(seq):
record = seq
if type(record) == str:
if dbtype == "local":
if fileformat != None:
fmt = fileformat
else:
if record.split(".")[-1] in ["gb","gbk","genbank"]:
fmt = "genbank"
elif record.split(".")[-1] in ["fasta","fna","fa","faa"]:
fmt = "fasta"
else:
fmt = "genbank"
record = SeqIO.parse(record,fmt)
record = next(record)
elif dbtype in ("ncbi", "addgene", "benchling", "googledrive"):
fmt = "genbank"
record = QUEEN._get_genbank(record, dbtype)
with tempfile.TemporaryFile(mode="w+") as o:
content = record.getvalue()
o.write(content)
o.seek(0)
record = SeqIO.parse(o,fmt)
record = next(record)
elif type(record) == SeqRecord:
record = record
else:
record = SeqIO.parse(record,None)
self._seq = str(record.seq).upper()
self.record = record
if "topology" in record.annotations:
self._topology = record.annotations["topology"]
else:
self._topology = topology
if ssdna == False:
self._right_end = ""
self._left_end = ""
self._left_end_top = 1
self._left_end_bottom = 1
self._right_end_top = 1
self._right_end_bottom = 1
else:
self._right_end = self._seq
self._left_end = self._seq
self._left_end_top = 1
self._left_end_bottom = -1
self._right_end_top = 1
self._right_end_bottom = -1
if project is None:
if record.id == "" or record.id == ".":
project = frecord.split("/")[-1].split(".")
project = project[0] if len(project) == 1 else ".".join(project[:-1])
else:
project = record.id
self._unique_id = project
#import features
self._dnafeatures = []
if len(record.features) > 0:
for feat in record.features:
self._dnafeatures.append(DNAfeature(feature=feat, subject=self))
pairs = []
history_feature = None
history_nums = [QUEEN._num_history]
for feat in self.dnafeatures:
if feat.type == "source" and feat.start == 0 and feat.end == len(self.seq):
for key in feat.qualifiers:
if "building_history" in key[0:18] and import_history == True:
history = feat.qualifiers[key][0]
feat.qualifiers[key][0] = feat.qualifiers[key][0].replace(" ","")
feat.qualifiers[key][2] = feat.qualifiers[key][2].replace(" ","")
results = re.findall("QUEEN.dna_dict\['[^\[\]]+'\]", history)
for result in results:
_unique_id = result.split("['")[1][:-2]
QUEEN.dna_dict[_unique_id] = None
history_num = int(key.split("_")[-1])
pairs.append((feat, history_num, feat.qualifiers[key]))
history_feature_id = "0"
history_feature = copy.deepcopy(feat)
else:
for key in feat.qualifiers:
if key == "broken_feature":
feat.qualifiers["broken_feature"][0] = feat.qualifiers["broken_feature"][0].replace(" ","")
note = feat.qualifiers["broken_feature"][0]
original = note.split(":")[-3]
feat._original = original
if len(pairs) == 0:
import_history = False
if import_history == True:
for pair in pairs:
del history_feature.qualifiers["building_history_{}".format(pair[1])]
for pair in pairs:
feat = pair[0]
new_history_num = pair[1] + QUEEN._num_history
history_feature.qualifiers["building_history_{}".format(new_history_num)] = pair[2]
history_nums.append(new_history_num)
else:
#QUEEN.process_description = process_description
deletehistory(self)
QUEEN._num_history = max(history_nums)
if history_feature is not None:
self._history_feature = history_feature
self._dnafeatures.remove(pairs[0][0])
if len(self.dnafeatures) == 0:
import_history = False
self._dnafeatures = []
elif record is None:
import_history = False
if project is None:
project = "dna"
seq = seq.upper()
sticky = False
pattern1 = "[ATGCRYKMSWBDHVNatgcrykmsbdhvn*-]+/?[ATGCRYKMSWBDHVNatgcrykmsbdhvn*-]*"
pattern2 = "[ATGCRYKMSWBDHVNatgcrykmsbdhvn]+-+[ATGCRYKMSWBDHVNatgcrykmbdhvn]+"
pattern1 = re.compile(pattern1)
pattern2 = re.compile(pattern2)
if pattern1.fullmatch(seq) != None or seq == "":
if "/" in seq:
top, bottom, seq = QUEEN._check_seq(seq)
if "-" in top or "-" in bottom:
if top !=False and pattern2.search(top) is None and pattern2.search(bottom) is None:
sticky = True
else:
raise TypeError("An invalid nucleotide sequence pattern was found.")
nucl_set_top = list(set(list(top)))
nucl_set_bottom = list(set(list(bottom)))
if (len(nucl_set_top) == 1 and nucl_set_top[0] == "-"):
self._ssdna = True
seq = bottom[::-1]
top = bottom[::-1]
bottom = "-" * len(bottom)
elif (len(nucl_set_bottom) == 1 and nucl_set_bottom[0] == "-"):
self._ssdna = True
else:
pass
else:
sticky = False
else:
if ssdna == True:
top = seq
bottom = "-" * len(seq)
sticky = True
else:
pass
self._seq = str(seq).upper()
if "_" in project:
project = project.replace("_","-")
self._unique_id = project
if Alphabet:
self.record = SeqRecord(Seq(str(seq),Alphabet.DNAAlphabet()))
else:
self.record = SeqRecord(Seq(str(seq)))
self._dnafeatures = []
self._topology = topology
if sticky == True:
self._topology = "linear"
self._left_end = ""
self._right_end = ""
if top[0] == "-":
for c, char in enumerate(top):
if char != "-":
break
else:
self._left_end += bottom[c].translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB"))
self._left_end_top = -1
self._left_end_bottom = 1
else:
if bottom[0] == "-":
for c, char in enumerate(bottom):
if char != "-":
break
else:
self._left_end += top[c]
self._left_end_bottom = -1
self._left_end_top = 1
else:
self._left_end = ""
self._left_end_bottom = 1
self._left_end_top = 1
if top[-1] == "-":
for c, char in enumerate(top[::-1]):
if char != "-":
break
else:
self._right_end += bottom[::-1][c].translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB"))
self._right_end = self._right_end[::-1]
self._right_end_top = -1
self._right_end_bottom = 1
else:
if bottom[-1] == "-":
for c, char in enumerate(bottom[::-1]):
if char != "-":
break
else:
self._right_end += top[::-1][c]
self._right_end = self._right_end[::-1]
self._right_end_bottom = -1
self._right_end_top = 1
else:
self._right_end = ""
self._right_end_bottom = 1
self._right_end_top = 1
else:
self._right_end = ""
self._left_end = ""
self._left_end_top = 1
self._left_end_bottom = 1
self._right_end_top = 1
self._right_end_bottom = 1
else:
raise TypeError("An invalid nucleotide sequence pattern was found.")
self._setfeatureids()
if type(setfeature) in (tuple, list) and type(setfeature[0]) == dict:
for feature_dict in setfeature:
self.setfeature(feature_dict)
elif type(setfeature) == dict:
self.setfeature(setfeature)
if _direct == 1 and quinable == True:
self._product_id = self._unique_id if product is None else product
if import_history == False:
fseq = "" if fseq is None else "seq='{}'".format(fseq)
frecord = "" if frecord is None else "record='{}'".format(frecord) if fseq == "" else ", record='{}'".format(frecord)
fdbtype = "" if fdbtype == "local" else ", dbtype='{}'".format(fdbtype)
fproject = "" if fproject is None else ", project='{}'".format(fproject)
fssdna = "" if ssdna == False else ", ssdna='{}'".format(ssdna)
ftopology = "" if topology == "linear" else ", topology='{}'".format(topology)
fproduct = "" if fproduct is None else ", product='{}'".format(fproduct)
fileformat = "" if fileformat is None else ", fileformat='{}'".format(fileformat)
fsetfeature = "" if setfeature == False else ", setfeature={}".format(str(setfeature))
process_name = "" if process_name is None else ", process_name='" + process_name + "'"
process_description = "" if process_description is None else ", process_description='" + process_description + "'"
args = [fseq, frecord, fdbtype, fproject, fssdna, ftopology, fileformat, fsetfeature, fproduct, process_name, process_description]
building_history = "QUEEN.dna_dict['{}'] = QUEEN({}{}{}{}{}{}{}{}{}{})".format(self._product_id, *args)
process_id, original_ids = make_processid(self, building_history, process_id, original_ids)
QUEEN._num_history += 1
feat = SeqFeature(FeatureLocation(0, len(self.seq), strand=1), type="source")
feat._id = "0"
feat.qualifiers["label"] = [self.project]
feat.qualifiers["description"] = ["Record of building history"]
feat.qualifiers["building_history_{}".format(QUEEN._num_history)] = [building_history.replace(" ","–"), "", ",".join([process_id] + original_ids)]
feat = DNAfeature(feature=feat, subject=self)
self._history_feature = feat
else:
nums = []
for key in self._history_feature.qualifiers:
if "building_history" in key[0:18]:
nums.append(int(key.split("_")[-1]))
nums.sort()
for key in self._history_feature.qualifiers:
if "building_history" in key[0:18]:
num = int(key.split("_")[-1])
if num != nums[-1]:
if self._history_feature.qualifiers[key][1] == "":
self._history_feature.qualifiers[key][1] = "_source: " + self.project + " construction"
else:
self._history_feature.qualifiers[key][1] += "; _source: " + self.project + " construction"
else:
if self._history_feature.qualifiers[key][1] == "":
self._history_feature.qualifiers[key][1] = "_source: " + self.project + " construction" + "; _load: " + self.project
else:
self._history_feature.qualifiers[key][1] += "; _source: " + self.project + " construction" + "; _load: " + self.project
if QUEEN._keep == 1 and _direct == 1 and quinable == True:
QUEEN._products[self._product_id] = self
if _direct == 1 and quinable == True and import_history == True:
self._load_history = 0
else:
self._load_history = -1
self._positions = tuple(range(len(self.seq)))
self.record.feartures = self.dnafeatures
self.seq.parental_id = self._unique_id
if product is None:
pass
else:
QUEEN._namespace[product] = self
def searchsequence(self, query, start=0, end=None, strand=2, unique=False, product=None, process_name=None, process_description=None, pn=None, pd=None, process_id=None, original_ids=[], _sourcefile=None, quinable=True, _direct=1):
"""Search for specific sequences from a QUEEN object.
Search for specific sequences from a user-defined region of a `QUEEN_object` and
return a list of `DNAfeature_objects`. Start and end attributes of returned
`DNAfeature_objects` represent the sequence regions of the `QUEEN_object` that
matched the user's query. Note that the returned `DNAfeature_objects` will not be
generated with `.feature_id` and reflected to the parental `QUEEN_object`.
The returned `DNAfeature_objects` can be added to `QUEEN_object.dnafeatures` by
`editfeature()` with the `createattribute` option as explained below.
Parameters
----------
query : regex or str, default: ".+"
Search query sequence. If the value is not provided, the user-specified search
region of the `QUEEN_object` sequence with `start` and `end` explained below will
be returned. It allows fuzzy matching and regular expression. For details,
see https://pypi.org/project/regex/.
All IUPAC nucleotide symbols can be used. Restriction enzyme cut motif
representation can be used to define a query with `"^"` and `"_"` or
`"(int/int)"`. For example, EcoRI cut motif can be provided by `"G^AATT_C"`,
where `"^"` and `"_"` represent the cut positions on the top and bottom strands,
respectively, or by `"GAATTC(-5/-1)"` or `"(-5/-1)GAATTC"`, where the left and
right integers between the parentheses represent the cut positions on the top and
bottom strands, respectively. Similarly, the cut motif of a Type-IIS restriction
enzyme BsaI can be given by `"GGTCTCN^NNN_N"`, `"N^NNN_NGAGACC"`, `"GGTCTC(1/5)"`
or `"(5/1)GAGACC"`. The returned `DNAfeature_objects` obtained for a query
restriction enzyme cut motif will hold the cutting rule in the
`"qualifiers:cutsite"` attribute, which can be added to `QUEEN_object.dnafeatures`
by `editfeature()` with the `createattribute` option as explained below.
Regular expression is disabled for restriction enzyme cut motifs.
start : int (zero-based indexing), default: 0
Start position of the target range of the `QUEEN_object` sequence for the search.
end : int (zero-based indexing; default: the last sequence position of `QUEEN_object`)
End position of the target range of the `QUEEN_object` sequence for the search.
strand : int: 1 (top strand only), -1 (bottom strand only), or 2 (both strands), default: 2
Sequence strand to be searched.
unique : bool (True or False), default: False
If the value is `True` and multiple (more than a single) sequence region are detected
in the search, it would raise error. If False, multiple seaquence detections could be
acceptable.
Returns
-------
list of QUEEN.qobj.DNAfeature object
"""
process_name = pn if process_name is None else process_name
process_description = pd if process_description is None else process_description
history_features = [self._history_feature]
if "__dict__" in dir(query) and "cutsite" in query.__dict__:
query = query.cutsite
qorigin = query
start = 0 if start == len(self.seq) else start
end = len(self.seq) if end is None else end
strand = 2 if strand is None else strand
if start == 0 and end == len(self.seq):
if self.topology == "circular":
subject = self.seq + self.seq
else:
subject = self.seq
else:
subject = self.printsequence(start, end, strand=1)
feat_list = []
if query is None:
if start > end:
locations = [(start, len(self.seq), strand), (0, end, strand)]
if strand == -1:
locations.reverse()
new_feat = SeqFeature(CompoundLocation(list(map(FeatureLocation, locations))), type="misc_feature")
else:
new_feat = SeqFeature(FeatureLocation(start, end, strand=1), type="misc_feature")
new_feat = DNAfeature(feature=new_feat, subject=dna, query=subject)
feat_list.append(new_feat)
else:
cut = 0
if set(str(query)) <= set("ATGCRYKMSWBDHVN^_/()-0123456789") and len(set(str(query)) & set("^_/()-0123456789")) > 0 and query.count("^") <= 2 and query.count("_") <= 2 and query.count("^") == query.count("_"):
cut = 1
cutsite, query, topl, topr, bottoml, bottomr = compile_cutsite(query)
else:
pass
if len(set(str(query)) & set("RYKMSWBDHVN()0123456789")) > 0:
query = query.replace("R","[RGA]")
query = query.replace("Y","[YTC]")
query = query.replace("K","[KGT]")
query = query.replace("M","[MAC]")
query = query.replace("S","[SGC]")
query = query.replace("W","[WAT]")
query = query.replace("B","[BGTC]")
query = query.replace("D","[DGAT]")
query = query.replace("H","[HACT]")
query = query.replace("V","[VGCA]")
query = query.replace("N","[NAGCT]")
if strand == 2:
match_list = get_matchlist_regex(self, query, value=None, subject=subject, s=start, e=end, strand=1)
match_list.extend(get_matchlist_regex(self, query, value=None, subject=subject.translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB"))[::-1], s=start, e=end, strand=-1))
elif strand == 1:
match_list = get_matchlist_regex(self, query, value=None, subject=subject, s=start, e=end, strand=strand)
elif strand == -1:
match_list = get_matchlist_regex(self, query, value=None, subject=subject.translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB"))[::-1], s=start, e=end, strand=strand)
else:
ValueError("When edit the sequence, the sequence strand to be edit should be '-1' or '+1.'")
match_positions = set()
for match in match_list:
span = (match["start"], match["end"])
if span not in match_positions:
match_positions.add(span)
if match["start"] > match["end"] and self.topology == "circular":
locations = [[match["start"], len(self.seq), match["strand"]], [0, match["end"], match["strand"]]]
if match["strand"] == -1:
locations.reverse()
new_feat = SeqFeature(CompoundLocation(list(map(lambda x:FeatureLocation(*x), locations))), type="misc_feature")
else:
new_feat = SeqFeature(CompoundLocation(list(map(lambda x:FeatureLocation(*x), locations))), type="misc_feature")
else:
new_feat = SeqFeature(FeatureLocation(match["start"], match["end"], strand=match["strand"]), type="misc_feature")
new_feat = DNAfeature(feature=new_feat, subject=self, query=qorigin.replace("^","").replace("_",""))
if cut == 1:
new_feat._digestion_topl = topl
new_feat._digestion_topr = topr
new_feat._digestion_bottoml = bottoml
new_feat._digestion_bottomr = bottomr
new_feat.qualifiers["cutsite"] = [Qseq(cutsite)]
if type(qorigin) == Qseq and qorigin.parental_class == "Cutsite":
new_feat.qualifiers["label"] = [qorigin.parent.name]
feat_list.append(new_feat)
else:
pass
qkey = self._unique_id + "_f" + str(QUEEN._qnum)
for i, feature in enumerate(feat_list):
if "label" in feature.qualifiers:
label = "_" + feature.qualifiers["label"][0]
else:
label = ""
if "_id" in feature.__dict__:
key = qkey + "_" + feature._id + label
else:
key = qkey + "_" + "q" + str(i) + label
feature._qkey = qkey
feature._second_id = key
feature._start.qkey = qkey
feature._start.parental_id = key
feature._end.qkey = qkey
feature._end.parental_id = key
QUEEN.queried_features_dict[qkey] = feat_list
QUEEN.queried_features_name_dict[qkey] = product
if _direct == 1 and quinable == True:
if type(qorigin) == Qseq:
if qorigin.parental_class == "DNAfeature":
history_features.append(qorigin.parent.subject._history_feature)
qkey = qorigin.qkey
for qindex, qfeat in enumerate(QUEEN.queried_features_dict[qkey]):
if qfeat._second_id == qorigin.parental_id:
break
if type(qorigin.item) == int:
qorigin = "QUEEN.queried_features_dict['{}'][{}].{}[{}]".format(qkey, qindex, "seq" , qorigin.item)
elif type(qorigin.item) == slice:
sl_start = qorigin.item.start
sl_stop = qorigin.item.stop
sl_step = qorigin.item.step
sl_start = "" if sl_start is None else sl_start
sl_stop = "" if sl_stop is None else sl_stop
if sl_step == 1 or sl_step == None:
qorigin = "QUEEN.queried_features_dict['{}'][{}].seq[{}:{}]".format(qkey, qindex, sl_start, sl_stop)
else:
qorigin = "QUEEN.queried_features_dict['{}'][{}].seq[{}:{}:{}]".format(qkey, qindex, sl_start, sl_stop, sl_step)
else:
qorigin = "QUEEN.queried_features_dict['{}'][{}].seq".format(qkey, qindex)
elif qorigin.parental_class == "QUEEN":
history_features.append(qorigin.parent._history_feature)
parental_id = qorigin.parental_id
if qorigin.name != None:
if "printsequence" in qorigin.name:
if len(qorigin.name.split("_")) == 2:
seqname = "QUEEN.dna_dict['{}'].printsequence(strand={})".format(parental_id, qorigin.name.split("_")[-1])
else:
seqname = "QUEEN.dna_dict['{}'].printsequence(start={}, end={}, strand={})".format(parental_id, *qorigin.name.split("_")[1:])
if qorigin.name == "rcseq":
seqname = "QUEEN.dna_dict['{}'].rcseq".format(parental_id)
else:
seqname = "QUEEN.dna_dict['{}'].seq".format(parental_id)
if type(qorigin.item) == int:
args.append("{}[{}]".format(seqname, qorigin.item))
elif type(qorigin.item) == slice:
sl_start = qorigin.item.start
sl_stop = qorigin.item.stop
sl_step = qorigin.item.step
sl_start = "" if sl_start is None else sl_start
sl_stop = "" if sl_stop is None else sl_stop
if sl_step == 1 or sl_step == None:
qorigin = "{}[{}:{}]".format(seqname, sl_start, sl_stop)
else:
qorigin = "{}[{}:{}:{}]".format(seqname, sl_start, sl_stop, sl_step)
else:
qorigin = "{}".format(seqname)
elif qorigin.parental_class == "Cutsite":
if qorigin.parent.name not in cs.defaultkeys:
cs.new_cutsites.add((qorigin.parent.name, qorigin.parent.cutsite))
if qorigin.name == "cutsite":
qorigin = "cs.lib['{}'].{}".format(qorigin.parent.name, qorigin.name)
else:
qorigin = "cs.lib['{}']".format(qorigin.parent.name)
else:
qorigin = "{}".format(repr(qorigin))
else:
qorigin = "{}".format(repr(qorigin))
if len(history_features) > 1:
history_feature = _combine_history(self, history_features)
self._history_feature = history_feature
funique = "" if unique == False else ", unique=True"
fproduct = "" if product is None else ", product='{}'".format(product)
process_name = "" if process_name is None else ", process_name='" + process_name + "'"
process_description = "" if process_description is None else ", process_description='{}'".format(process_description)
if start == 0 and end == len(self.seq):
if strand == 2:
building_history = "QUEEN.queried_features_dict['{}'] = QUEEN.dna_dict['{}'].searchsequence(query={}{}{}{}{})".format(qkey, self._product_id, qorigin, funique, fproduct, process_name, process_description)
process_id, original_ids = make_processid(self, building_history, process_id, original_ids)
add_history(self, [building_history, "query: {}".format(qorigin), ",".join([process_id] + original_ids)], _sourcefile=_sourcefile)
else:
building_history = "QUEEN.queried_features_dict['{}'] = QUEEN.dna_dict['{}'].searchsequence(query={}, strand={}{}{}{}{})".format(qkey, self._product_id, qorigin, strand, funique, fproduct, process_name, process_description)
process_id, original_ids = make_processid(self, building_history, process_id, original_ids)
add_history(self, [building_history, "query: {}; strand: {}".format(qorigin, strand), ",".join([process_id] + original_ids)], _sourcefile=_sourcefile)
else:
building_history = "QUEEN.queried_features_dict['{}'] = QUEEN.dna_dict['{}'].searchsequence(query={}, start={}, end={}, strand={}{}{}{}{})".format(qkey, self._product_id, qorigin, start, end, strand, funique, fproduct, process_name, process_description)
process_id, original_ids = make_processid(self, building_history, process_id, original_ids)
add_history(self, [building_history, "query: {}; start: {}; end: {}; strand: {}".format(qorigin, start, end, strand), ",".join([process_id] + original_ids)], _sourcefile=_sourcefile)
QUEEN._qnum += 1
if product is None:
pass
else:
QUEEN._namespace[product] = feat_list
if unique == True and len(feat_list) > 1:
raise ValueError("Mutiple sequence regions were detected. If `unique` is True, the search results should be unique.")
return feat_list
def searchfeature(self, key_attribute="all", query=".+", source=None, start=0, end=None, strand=2, product=None, process_name=None, process_description=None,
pn=None, pd=None, process_id=None, original_ids=[], _sourcefile=None, quinable=True, _direct=1):
"""Search for `DNAfeature_objects` holding a queried value in a designated `key_attribute`.
Parameters
----------
key_attribute : str, default: "all"
Attribute type to be searched (`feature_id`, `feature_type`, `"qualifiers:*"`,
or `sequence`). If the value is not provided, it will be applied to all of the
attributes in the `QUEEN_object`, excluding `sequence`. However, if the `query`
value is provided with only the four nucleotide letters (A, T, G, and C), this
value will be automatically set to `sequence`.
query : regex or str, default: ".+"
Query term. `DNAfeature_objects` that have a value matches to this query for
`key_attribute` designated above will be returned. It allows fuzzy matching and
regular expression. For details, see https://pypi.org/project/regex/. If the
`key_attribute` is `sequence`, all IUPAC nucleotide symbols can be used.
source : list of `DNAfeature_objects`, default: `QUEEN_object.dnafeatures`
Source `DNAfeature_objects` to be searched. `DNAfeature_objects` outside the
search range defined by `start`, `end`, and `strand` will be removed from the
source. Any `DNAfeature_objects` can be provided here. For example, a list of
`DNAfeature_objects` _returned from another `searchsequence()` or `searchfeature()`
operation can be used as the source to achieve an AND search with multiple queries.
start : int (zero-based indexing), default: 0
Start position of the target range of the `QUEEN_object` sequence for the search.
end : int (zero-based indexing), default: the last sequence position of `QUEEN_object`
End position of the target range of the `QUEEN_object` sequence for the search.
strand : int 1 (top strand), -1 (bottom strand), or 2 (both strands), default: 2
Sequence strand to be searched.
process_name : str
For detailes, see `QUEEN.queen.qobj.__doc__`.
process_description : str
For detailes, see `QUEEN.queen.qobj.__doc__`.
process_id : str
For detailes, see `QUEEN.queen.qobj.__doc__`.
quinable : bool
For detailes, see `QUEEN.queen.qobj.__doc__`.
Returns
-------
list of QUEEN.qobj.DNAfeature object
"""
process_name = pn if process_name is None else process_name
process_description = pd if process_description is None else process_description
history_features = [self._history_feature]
start = 0 if start == len(self.seq) else start
end = len(self.seq) if end is None else end
qkey = self._unique_id + "_f" + str(QUEEN._qnum)
features = editfeature(self, key_attribute=key_attribute, query=query, source=source, start=start, end=end, strand=strand, target_attribute=None, operation=None, __direct=0, process_description=process_description)
feature_names = []
for i, feature in enumerate(features):
if "label" in feature.qualifiers:
label = "_" + feature.qualifiers["label"][0]
else:
label = ""
if "_id" in feature.__dict__:
key = qkey + "_" + feature._id + label
else:
key = qkey + "_" + "q" + str(i) + label
feature._qkey = qkey
feature._second_id = key
feature._start.qkey = qkey
feature._start.parental_id = key
feature._end.qkey = qkey
feature._end.parental_id = key
feature_names = ", ".join(feature_names)
QUEEN.queried_features_dict[qkey] = features
QUEEN.queried_features_name_dict[qkey] = product
if _direct == 1:
if type(query) == Qseq:
if query.parental_class == "DNAfeature":
history_features.append(query.parent.subject._history_feature)
qkey = left_origin.qkey
for qindex, qfeat in enumerate(QUEEN.queried_features_dict[qkey]):
if qfeat._second_id == query.parental_id:
break
if type(query.item) == int:
query = "QUEEN.queried_features_dict['{}'][{}].{}[{}]".format(qkey, qindex, "seq" , query.item)
elif type(query.item) == slice:
sl_start = query.item.start
sl_stop = query.item.stop
sl_step = query.item.step
sl_start = "" if sl_start is None else sl_start
sl_stop = "" if sl_stop is None else sl_stop
if sl_step == 1 or sl_step == None:
query = "QUEEN.queried_features_dict['{}'][{}].seq[{}:{}]".format(qkey, qindex, sl_start, sl_stop)
else:
query = "QUEEN.queried_features_dict['{}'][{}].seq[{}:{}:{}]".format(qkey, qindex, sl_start, sl_stop, sl_step)
else:
query = "QUEEN.queried_features_dict['{}'][{}].seq".format(qkey, qindex)
elif query.parental_class == "QUEEN":
history_features.append(query.parent._history_feature)
parental_id = query.parental_id
if query.name != None:
if "printsequence" in query.name:
if len(query.name.split("_")) == 2:
seqname = "QUEEN.dna_dict['{}'].printsequence(strand={})".format(parental_id, query.name.split("_")[-1])
else:
seqname = "QUEEN.dna_dict['{}'].printsequence(start={}, end={}, strand={})".format(parental_id, *query.name.split("_")[1:])
if query.name == "rcseq":
seqname = "QUEEN.dna_dict['{}'].rcseq".format(parental_id)
else:
seqname = "QUEEN.dna_dict['{}'].seq".format(parental_id)
if type(query.item) == int:
args.append("{}[{}]".format(seqname, query.item))
elif type(query.item) == slice:
sl_start = query.item.start
sl_stop = query.item.stop
sl_step = query.item.step
sl_start = "" if sl_start is None else sl_start
sl_stop = "" if sl_stop is None else sl_stop
if sl_step == 1 or sl_step == None:
query = "{}[{}:{}]".format(seqname, sl_start, sl_stop)
else:
query = "{}[{}:{}:{}]".format(seqname, sl_start, sl_stop, sl_step)
else:
query = "{}".format(seqname)
elif query.parental_class == "Cutsite":
if query.parent.name not in cs.defaultkeys:
cs.new_cutsites.add((query.parent.name, query.parent.cutsite))
query = "cs.lib['{}'].{}".format(query.parent.name, query.name)
else:
query = "{}".format(repr(query))
else:
query = "{}".format(repr(query))
if source is not None:
qkeys = set([])
for feat in source:
if "_qkey" in feat.__dict__:
qkeys.add(feat._qkey)
if len(set(qkeys)) == 1:
source = "QUEEN.queried_features_dict['{}']".format(list(qkeys)[0])
else:
pass
fproduct = "" if product is None else ", product='" + product + "'"
process_name = "" if process_name is None else ", process_name='" + process_name + "'"
process_description = "" if process_description is None else ", process_description='" + process_description + "'"
if len(history_features) > 1:
history_feature = _combine_history(self, history_features)
self._history_feature = history_feature
if start == 0 and end == len(self.seq):
if strand == 2 and source is None:
building_history = "QUEEN.queried_features_dict['{}'] = QUEEN.dna_dict['{}'].searchfeature(key_attribute='{}', query={}{}{}{})".format(qkey, self._product_id, key_attribute, query, fproduct, process_name, process_description)
process_id, oiginal_ids = make_processid(self, building_history, process_id, original_ids)
add_history(self, [building_history, "key_attribute: {}; query: {}".format(key_attribute, query), ",".join([process_id] + original_ids)], _sourcefile=_sourcefile)
elif strand == 2:
building_history = "QUEEN.queried_features_dict['{}'] = QUEEN.dna_dict['{}'].searchfeature(key_attribute='{}', query={}, source={}{}{}{})".format(qkey, self._product_id, key_attribute, query, source, fproduct, process_name, process_description)
process_id, original_ids = make_processid(self, building_history, process_id, original_ids)
add_history(self, [building_history, "key_attribute: {}; query: {}; soruce: {}".format(key_attribute, query, source), ",".join([process_id] + original_ids)], _sourcefile=_sourcefile)
else:
building_history = "QUEEN.queried_features_dict['{}'] = QUEEN.dna_dict['{}'].searchfeature(key_attribute='{}', query={}, source={}, strand={}{}{}{})".format(qkey, self._product_id, key_attribute, query, source, strand, fproduct, process_name, process_description)
process_id, original_ids = make_processid(self, building_history, process_id, original_ids)
add_history(self, [building_history, "key_attribute: {}; query: {}; soruce: {}; strand: {}".format(key_attribute, query, source, strand), ",".join([process_id] + original_ids)], _sourcefile=_sourcefile)
else:
building_history = "QUEEN.queried_features_dict['{}'] = QUEEN.dna_dict['{}'].searchfeature(key_attribute='{}', query={}, source={}, start={}, end={}, strand={}{}{}{})".format(qkey, self._product_id, key_attribute, query, source, start, end, strand, fproduct, process_name, process_description)
process_id, original_ids = make_processid(self, building_history, process_id, original_ids)
add_history(self, [building_history, "key_attribute: {}; query: {}; soruce: {}; start: {}; end: {}; strand: {}".format(key_attribute, query, source, start, end, strand), ",".join([process_id] + original_ids)], _sourcefile=_sourcefile)
QUEEN._qnum += 1
if product is None:
pass
else:
QUEEN._namespace[product] = features
return features
def __getitem__(self, item):
if type(item) == slice:
if item.step is None:
strand = 1
elif type(item.step) != int:
raise TypeError("slice indices must be integers or None or have an __index__ method.")
else:
strand = item.step
if item.start is None:
start = 0
else:
if type(item.start) == int:
start = item.start
else:
raise TypeError("slice indices must be integers or None or have an __index__ method.")
if item.stop is None:
end = len(self.seq)
else:
if type(item.stop) == int:
end = item.stop
else:
raise TypeError("slice indices must be integers or None or have an __index__ method.")
if type(start) == int and type(end) == int:
if start < 0:
start = len(self.seq) - abs(start)
if end < 0:
end = len(self.seq) - abs(end)
subdna = cropdna(self, start, end, __direct=0)
else:
raise TypeError("slice indices must be integers or None or have an __index__ method")
if strand == -1 or strand < 0:
return flipdna(subdna, __direct==0)
else:
return subdna
else:
raise ValueError("Invalid index type was specified.")
def __add__(self, other):
if (type(other) == str and set(other) <= set("ATGCRYKMSWBDHVNatgcrykmswbdhvn-")) or type(other) == Seq:
other = QUEEN(seq=other)
elif type(other) == SeqRecord:
other = QUEEN(record=other)
elif type(other) ==QUEEN:
pass
if other.topology == "circular" or self.topology == "circular":
raise ValueError("Cicularized QUEEN object cannot be joined with others.")
else:
return joindna(self, other, __direct=0)
def __radd__(self, other):
if (type(other) == str and set(other) <= set("ATGCRYKMSWBDHVNatgcrykmswbdhvn-")) or type(other) == Seq:
other = QUEEN(seq=other)
elif type(other) == Seq.SeqRecord:
other = QUEEN(record=other)
elif type(other) == QUEEN:
pass
if other.topology == "circular" or self.topology == "circular":
raise ValueError("Cicularized QUEEN object cannot be joined with others.")
else:
return joindna(other, self, __direct==0)
def printsequence(self, start=None, end=None, strand=2, hide_middle=None, linebreak=None, display=False):
"""Returns and displays partial or the entire dsDNA sequence and sequence end structures.
Parameters
----------
start : int (zero-based indexing), default: 0
Start position of the sequence.
end : int (zero-based indexing), default: the last sequence position of `QUEEN_object`
End position of the sequence.
strand : int: 1 (top strand only), -1 (bottom strand only), or 2 (both strands), default: 2
Sequence strand(s) to be returned.
display : bool (True or False), default: True
If `True`, the output will be displayed in `STDOUT`.
hide_middle: int or None, default: None
Length of both end sequences to be displayed.
linebreak: int (default: length of the `QUEEN_object` sequence)
Length of sequence for linebreak.
Returns
-------
If `strand` == `1` or `-1`,
sequence of the defined strand (5’→3’)
If `strand` == `2`,
"str/str" (top strand sequence (5’→3’)/bottom strand sequence (3’→5’))
"""
if self._ssdna == True:
if display == True:
print("5' {} 3'".format(self.seq))
return self.seq
whole = False
if linebreak is None:
width = len(self.seq) + 1
else:
width = linebreak
if hide_middle is None or hide_middle > 0.5 * len(self.seq):
hide_middle = int(0.5 * len(self.seq))
whole = True
tl = len(self._left_end) if self._left_end_top == -1 else 0
tr = len(self._right_end) if self._right_end_top == -1 else 0
truetop = "-" * tl + self.seq[tl:(-1*tr if tr != 0 else None)] + "-" * tr
bl = len(self._left_end) if self._left_end_bottom == -1 else 0
br = len(self._right_end) if self._right_end_bottom == -1 else 0
truebottom = "-" * bl + self.rcseq[::-1][bl:(-1*br if br != 0 else None)] + "-" * br
if start is None and end is None and strand == 2:
rcseq = self.seq.translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB"))[::-1]
if len(self._left_end) > hide_middle:
left_length = hide_middle
else:
left_length = len(self._left_end)
if self._left_end_top == 1:
left_end_top = self.seq[:hide_middle]
else:
left_end_top = "-" * left_length + self.seq[left_length:hide_middle]
if self._left_end_bottom == 1:
left_end_bottom = self.seq[:hide_middle].translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB"))
else:
left_end_bottom = "-" * left_length + self.seq[left_length:hide_middle].translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB"))
if len(self._right_end) > hide_middle:
right_length = hide_middle
else:
right_length = len(self._right_end)
if self._right_end_top == 1:
right_end_top = self.seq[len(self.seq)-hide_middle:]
else:
right_end_top = self.seq[len(self.seq)-hide_middle:len(self.seq)-right_length] + "-" * right_length
if self._right_end_bottom == 1:
right_end_bottom = self.seq[len(self.seq)-hide_middle:].translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB"))
else:
right_end_bottom = self.seq[len(self.seq)-hide_middle:len(self.seq)-right_length].translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB")) + "-" * right_length
top = left_end_top + self.seq[hide_middle:len(self.seq)-hide_middle] + right_end_top
bottom = left_end_bottom + self.seq[hide_middle:len(self.seq)-hide_middle].translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB")) + right_end_bottom
else:
if type(start) is DNAfeature:
feature = start
start = feature.start
end = feature.end
strand = feature.strand
if start is None:
start = 0
if end is None:
end = len(self.seq)
if strand is None:
strand = 2
if start >= end:
if self.topology == "circular":
top = self.seq[start:] + self.seq[:end]
else:
return ""
else:
top = self.seq[start:end]
bottom = top.translate(str.maketrans("ATGCRYKMSWBDHV","TACGYRMKWSVHDB"))
out = ""
if display == True:
if whole == False:
if display == True:
print("5' {}...{} 3'".format(left_end_top, right_end_top))
print("3' {}...{} 5'".format(left_end_bottom, right_end_bottom))
print()
out += "5' {}...{} 3'\n".format(left_end_top, right_end_top)
out += "3' {}...{} 5'".format(left_end_bottom, right_end_bottom)
else:
if len(top) < width:
if display == True:
print("5' {} 3'".format(top))
print("3' {} 5'".format(bottom))
print()
out += "5' {} 3'\n".format(top)
out += "3' {} 5'\n".format(bottom)
else:
for i in range(0, len(top), width):
if display == True:
print("5' {} 3'".format(top[i:i+width]))
print("3' {} 5'".format(bottom[i:i+width]))
print()
out += "5' {} 3'\n".format(top[i:i+width])
out += "3' {} 5'\n".format(bottom[i:i+width])
out += "\n"
out = out.rstrip()
if start is None and end is None:
if strand == 1 or strand == 0:
return_seq = truetop
elif strand == -1:
return_seq = truebottom[::-1]
elif strand == 2:
return_seq = truetop + "/" + truebottom
else:
if strand == 1 or strand == 0:
return_seq = top
elif strand == -1:
return_seq = bottom[::-1]
elif strand == 2:
return_seq = top + "/" + bottom
return_seq = Qseq(return_seq)
return_seq.__dict__ = self.seq.__dict__
if start == 0 or None and end == len(self.seq) or None:
return_seq.name = "printsequence_{}".format(strand)
else:
return_seq.name = "printsequence_{}_{}_{}".format(start, end, strand)
return return_seq
def _setfeatureids(self):
keys = [feat._id for feat in self.dnafeatures if "_id" in feat.__dict__]
keys = tuple(keys)
for i in range(0, len(self.dnafeatures)):
if "_id" in self._dnafeatures[i].__dict__ and (self._dnafeatures[i]._id.isdecimal()==False and keys.count(self._dnafeatures[i]._id)>0):
pass
else:
self._dnafeatures[i]._id = str(i*100)
def _check_uniqueness(self):
keys_list = [feat._id for feat in self.dnafeatures if "_id" in feat.__dict__]
keys_list = tuple(keys_list)
keys_set = set(keys_list)
if len(keys_list) == len(keys_set):
pass
else:
for i in range(0, len(self.dnafeatures)):
if "_id" in self._dnafeatures[i].__dict__ and keys_list.count(self._dnafeatures[i]._id)==1:
pass
else:
j = 0
while str(i*100 + j) in keys_set:
j += 1
self._dnafeatures[i]._id = str(i*100 + j)
def getdnafeatures(self,feature_id):
return self.features_dict[str(feature_id)]
def setfeature(self, attribute_dict=None):
"""Create a new feature in the QUEEN object.
Parameters
----------
attribute_dict : dict
Dictionaly with key-value pairs of the attributes of DNAfeature objects:
"feature_id", "feature_type", "start", "end", "strand", and "qualifiers:*".
The following attributes have default value, so if they are not specified in the
dictionary, the value would be set with the default values.
- `feature_id` : `str`,
The default value is a random unique ID which is not used in `.dnafeatures` of
the QUEEN object.
- `feature_type` : `str` (default: `"misc_feature"`)
- `start` : `int` (default: 0)
- `end` : `int` (default: length of the `QUEEN_object` sequence)
- `strand` : `int` (-1, 0 or 1, default: 1)
Returns
-------
self.dnafeatures: list of DNAfeature object
"""
if attribute_dict is None:
attribute_dict = {}
elif type(attribute_dict) == dict:
attribute_dict = copy.copy(attribute_dict)
attribute_dict.setdefault("feature_type", "misc_feature")
attribute_dict.setdefault("start", 0)
attribute_dict.setdefault("end", len(self.seq))
attribute_dict.setdefault("strand", 1)
if type(attribute_dict["start"]) == int:
if attribute_dict["start"] > attribute_dict["end"] and self.topology == "circular":
locations = [[attribute_dict["start"], len(self.seq), attribute_dict["strand"]], [0, attribute_dict["end"], attribute_dict["strand"]]]
if attribute_dict["strand"] == -1:
locations.reverse()
new_feat = SeqFeature(CompoundLocation(list(map(lambda x:FeatureLocation(*x), locations))), type=attribute_dict["feature_type"])
else:
new_feat = SeqFeature(CompoundLocation(list(map(lambda x:FeatureLocation(*x), locations))), type=attribute_dict["feature_type"])
else:
new_feat = SeqFeature(FeatureLocation(attribute_dict["start"], attribute_dict["end"], strand=attribute_dict["strand"]), type=attribute_dict["feature_type"])
elif type(attribute_dict["start"]) in (tuple, list):
locations = []
for s, e in zip(attribute_dict["start"], attribute_dict["end"]):
if s > e:
if self.topology == "circular":
locations.append((s, len(self.seq), attribute_dict["strand"]))
locations.append((0, e, attribute_dict["strand"]))
else:
raise ValueError("'end' position must be larger than 'start' position.")
else:
locations.append((s, e, attribute_dict["strand"]))
if attribute_dict["strand"] == -1:
locations.reverse()
new_feat = SeqFeature(CompoundLocation(list(map(lambda x:FeatureLocation(*x), locations))), type=attribute_dict["feature_type"])
else:
new_feat = SeqFeature(CompoundLocation(list(map(lambda x:FeatureLocation(*x), locations))), type=attribute_dict["feature_type"])
for key in attribute_dict:
if key[:len("qualifier:")] == "qualifier:":
subkey = key[len("qualifier:"):]
if subkey not in new_feat.qualifiers:
new_feat.qualifiers[subkey] = [attribute_dict[key]]
else:
new_feat.qualifiers[subkey].append(attribute_dict[key])
else:
pass
features_dict = self.features_dict
new_feat = DNAfeature(feature=new_feat, subject=self)
if "feature_id" not in attribute_dict:
feature_ids = [feat.feature_id for feat in self.dnafeatures if feat.feature_id.isdecimal()]
if len(feature_ids) == 0:
new_id = str(1)
for _id in feature_ids:
start = features_dict[_id].start
new_id = str(int(_id) + 1)
if attribute_dict["start"] > start:
while new_id in features_dict:
new_id = str(int(_id) + 1)
break
new_feat._id = new_id
else:
if attribute_dict["feature_id"] in set(features_dict.keys()):
raise ValueError("feature_id value should be uniqule in .dnafeatures.")
else:
new_feat._id = attribute_dict["feature_id"]
self._dnafeatures.append(new_feat)
def printfeature(self, feature_list=None, attribute=None, separation=None, detail=False, seq=False, output=None, x_based_index=0):
""" Print a tidy data table of annotation features/attributes of `QUEEN_object`.
Default output attributes are `"feature_id"`, `"feature_type"`,
`"qualifiers:label"`, `"start"`, `"end"`, and `"strand"`.
Parameters
----------
feature_list : list of QUEEN.qobj.DNAfeature objects (default: self.dnafeatures)
List of features to be displayed in the output table. If not given, all features
held by the QUEEN_object will be the subject.
attribute : list of feature attributes
The default value is `["feature_id", "feature_type", "qualifiers:label", "start",
"end", "strand"]`. List of feature attributes to be displayed in the output
table. If the value is `"all"`, it will generate a table for all the attributes
held by the `QUEEN_object` except for `"sequence"`.
seq : bool (True or False), default: `False`
If `True`, the sequence of each feature for its encoded direction will be
displayed in the output table.
separation : str, default: space(s) to generate a well-formatted table
String to separate values of each line.
output : str, default: STDOUT
Output file name.
x_based_index : 0 or 1, default: 0
As a default, positions of all features are given in the zero-based indexing in
QUEEN (same as Python). If this parameter is set to `1`, they will be shown in
the 1-based indexing (as seen in the GenBank format).
Returns
-------
None
"""
_ids = ["feature_id"]
types = ["feature_type"]
labels = ["qualifier:label"]
starts = ["start"]
ends = ["end"]
strands = ["strand"]
sequences = ["sequence"]
sep = separation
seqflag = seq
others_dict = {}
if attribute is None:
attribute = ["feature_id", "feature_type", "qualifier:label", "start", "end", "strand"]
new_attribute = []
for att in attribute:
if att == "$DEFAULT":
new_attribute += ["feature_id", "feature_type", "qualifier:label", "start", "end", "strand"]
else:
new_attribute.append(att)
attribute = new_attribute
if feature_list is None:
features = list(self.dnafeatures)
features.sort(key=lambda x:x.location.parts[0].start.position)
else:
features = feature_list
for feat in features:
flag = 0
label_keys = []
for key in feat.qualifiers:
if key == "label":
label = feat.qualifiers["label"][0]
flag = 1
elif ("qualifier:") + key not in others_dict and ((key in attribute or ("qualifier:" + key) in attribute) or detail==True):
others_dict["qualifier:" + key] = ["qualifier:" + key] + (len(labels)-1) * ["null"]
if flag == 0:
label = "null"
strand = feat.location.strand
start = feat.start
end = feat.end
seq = feat.sequence
if x_based_index == 1:
start += 1
if len(attribute) > 0:
for key in others_dict:
if key == "label":
pass
elif key in feat.qualifiers or key.replace("qualifier:","") in feat.qualifiers:
if type(feat.qualifiers[key.replace("qualifier:","")]) == list:
others_dict[key].append(":".join(feat.qualifiers[key.replace("qualifier:","")]))
else:
others_dict[key].append(feat.qualifiers[key.replace("qualifier:","")])
else:
others_dict[key].append("null")
if "_id" not in feat.__dict__:
feat._id = "null"
_ids.append(str(feat._id))
labels.append(str(label))
types.append(str(feat.type))
starts.append(str(start))
ends.append(str(end))
sequences.append(seq)
if strand == 1:
strands.append("+")
elif strand == 0:
strands.append("+")
else:
strands.append("-")
_idmax = max(list(map(len,_ids))) + 2
labelmax = max(list(map(len,labels))) + 2
ftypemax = max(list(map(len,types))) + 2
startmax = max(list(map(len,starts))) + 2
endmax = max(list(map(len,ends))) + 2
strandmax = max(list(map(len,strands))) + 2
sequencemax = max(list(map(len,sequences))) + 2
other_maxes = [max(list(map(len,others_dict[key]))) + 2 for key in others_dict]
dkeys = ["feature_id", "feature_type", "qualifier:label", "start", "end", "strand"] + list(others_dict.keys())
dvalues = [_ids, types, labels, starts, ends, strands] + list(others_dict.values())
dmaxes = [_idmax, ftypemax, labelmax, startmax, endmax, strandmax] + other_maxes
hogera = list(zip(dkeys, dvalues, dmaxes))
rows = []
maxes = []
if detail == False:
for key, value, max_w in zip(dkeys, dvalues, dmaxes):
if key.replace("qualifier:","") in attribute or key in attribute:
rows.append(value)
maxes.append(max_w)
else:
rows = [_ids, labels, types, starts, ends, strands] + list(others_dict.values())
maxes = [_idmax, labelmax, ftypemax, startmax, endmax, strandmax] + other_maxes
if seqflag == True and "sequence" not in attribute:
attribute.append("sequence")
if "sequence" in attribute:
rows.append(sequences)
maxes.append(sequencemax)
if type(output) is str:
output = open(output,"w")
if sep == ",":
import csv
output = csv.writer(output)
for n, row in enumerate(zip(*rows)):
if sep is None:
text = ""
for m, x in enumerate(row):
text += x + " " * (maxes[m]-len(x))
print(text, file=output)
else:
if sep == ",":
output.writerow(row)
else:
print(*row, sep=sep, file=output)
if output is None:
print()
def outputgbk(self, output=None, format="genbank", record_id=None, export_history=True, _return=False):
"""Output `QUEEN_object` to a GenBank file.
In addition to all of the `DNAfeature_objects` in the input `QUEEN_object`,
a `DNAfeature_object` encoding the entire construction processes that generated
the `QUEEN_object` in `qualifiers:building_history` will also be output to the
GenBank file.
Parameters
----------
output : str, default: STDOUT
Output file name.
format : str, default: "genbank"
Output file format
record_id : str, default: None
Record ID of a output file, If the value is `None`, self.project value is used as `record_id`.
export_history : bool, default: True
If False, construnction history of the `QUEEN_object` will not be output.
Returns
-------
None
"""
handle = output
export_input = False
separate_history = False
annotations = None
stdIOflag = 0
if handle is None:
stdIOflag = 1
handle = io.StringIO()
product_dict = {}
histories = quine(self, _return=True)
history_nums = [history[0] for history in histories]
history_feature = copy.deepcopy(self._history_feature)
remove_keys = []
for key in history_feature.qualifiers:
if "building_history" in key[0:18]:
num = int(key.split("_")[-1])
if num in history_nums:
process_id = history_feature.qualifiers[key][2].split(",")[0].replace(" ", "")
if "-" in process_id:
pass
else:
history_feature.qualifiers[key][2] = self.project + "-" + self._history_feature.qualifiers[key][2]
else:
remove_keys.append(key)
for key in remove_keys:
del history_feature.qualifiers[key]
features = copy.deepcopy(self.dnafeatures)
if export_history is True:
features.append(history_feature)
for feat in features:
if "broken_feature" in feat.qualifiers:
note = feat.qualifiers["broken_feature"][0]
label = ":".join(note.split(":")[:-1])
length = int(note.split(":")[-4])
pos_s = int(note.split(":")[-1].split("..")[0].replace(" ",""))
pos_e = int(note.split(":")[-1].split("..")[1].replace(" ",""))
if pos_e > length:
note = label + ":" + str(pos_s) + ".." + str(pos_e-length)
feat.qualifiers["broken_feature"] = [note]
elif (pos_s == 1 and pos_e == length) or (pos_s == length and pos_e == 1):
del feat.qualifiers["broken_feature"]
if separate_history is not False and type(separate_history) is str and export_history == True:
for feat in self.dnafeatures:
if feat.type == "source":
for key in feat.qualifiers:
if "building_history" in key[0:18] and import_history == True:
history = feat.qualifiers[key][0]
results = re.findall("QUEEN.dna_dict\['[^\[\]]+'\]", history)
history_num = int(key.split("_")[-1])
pairs.append((feat, history_num, history))
with open(separate_history, "w") as o:
print("This file describes the buiding history of '{}/{}'.".format(os.getcwd(), handle), file=o)
for pair in pairs:
del feat.qualifires["building_history" + "_" + str(pair[1])]
print(pair[1], pair[2], sep=",", file=o)
self.record.annotaions["source"] = os.getcwd() + "/" + separate_history
if export_input == True:
for hisoty in histories:
if "QUEEN(record=" in history[1] and ("dbtype='local'" in history or "dbtype" not in history[1]):
match = re.search("QUEEN.dna_dict\['([^\[\]]+)'\]", history[1])
if match is not None:
key = match.group(1)
if dans[0].productdict[key] is not None:
product_dict[key] = dnas[0].prodcutdict[key]
product_dict[key].record.annotations["keyword"] = "QUEEN input"
product_dict[key].record.annotations["accession"] = re.search("record='([^=]+)'[,\)]", history[1]).group(1)
else:
pass
else:
pass
if type(handle) is str:
handle = open(handle, "w")
self.record.features = features
if record_id is None:
self.record.id = self.project
else:
self.record.id = record_id
if Alphabet:
self.record.seq = Seq(str(self.seq),Alphabet.DNAAlphabet())
else:
self.record.seq = Seq(str(self.seq))
self.record.annotations["molecule_type"] = "DNA"
#Add DATE
import datetime
dt = datetime.datetime.now()
self.record.annotations["date"] = dt
SeqIO.write(self.record, handle, format)
self.record.features = self.dnafeatures
if stdIOflag == 1:
if _return == True:
return(handle.getvalue())
else:
print(handle.getvalue(), end="")
if export_input == True and len(product_dict) > 0:
for key in product_dict:
value = outputgbk(product_dict[key], export_input=False, _return=True)
if stdIOflag == 1:
#print("//")
print(value, end="")
else:
#handle.write("//\n")
handle.write(value)
if stdIOflag == 0:
handle.close()
| 48.585679
| 311
| 0.52043
|
cffd0da4027e4b232e1508643a81074271708563
| 2,313
|
py
|
Python
|
model_zoo/official/recommend/wide_and_deep/export.py
|
dongkcs/mindspore
|
cd7df6dbf463ff3128e9181e9d0c779cecb81320
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
model_zoo/official/recommend/wide_and_deep/export.py
|
dongkcs/mindspore
|
cd7df6dbf463ff3128e9181e9d0c779cecb81320
|
[
"Apache-2.0"
] | 3
|
2021-03-31T20:15:40.000Z
|
2022-02-09T23:50:46.000Z
|
model_zoo/official/recommend/wide_and_deep/export.py
|
dongkcs/mindspore
|
cd7df6dbf463ff3128e9181e9d0c779cecb81320
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
##############export checkpoint file into air and onnx models#################
"""
import numpy as np
from mindspore import Tensor, nn
from mindspore.ops import operations as P
from mindspore.train.serialization import load_checkpoint, load_param_into_net, export
from src.wide_and_deep import WideDeepModel
from src.config import WideDeepConfig
class PredictWithSigmoid(nn.Cell):
"""
PredictWithSigmoid
"""
def __init__(self, network):
super(PredictWithSigmoid, self).__init__()
self.network = network
self.sigmoid = P.Sigmoid()
def construct(self, batch_ids, batch_wts):
logits, _, = self.network(batch_ids, batch_wts)
pred_probs = self.sigmoid(logits)
return pred_probs
def get_WideDeep_net(config):
"""
Get network of wide&deep predict model.
"""
WideDeep_net = WideDeepModel(config)
eval_net = PredictWithSigmoid(WideDeep_net)
return eval_net
if __name__ == '__main__':
widedeep_config = WideDeepConfig()
widedeep_config.argparse_init()
ckpt_path = widedeep_config.ckpt_path
net = get_WideDeep_net(widedeep_config)
param_dict = load_checkpoint(ckpt_path)
load_param_into_net(net, param_dict)
ids = Tensor(np.ones([widedeep_config.eval_batch_size, widedeep_config.field_size]).astype(np.int32))
wts = Tensor(np.ones([widedeep_config.eval_batch_size, widedeep_config.field_size]).astype(np.float32))
input_tensor_list = [ids, wts]
export(net, *input_tensor_list, file_name='wide_and_deep.onnx', file_format="ONNX")
export(net, *input_tensor_list, file_name='wide_and_deep.air', file_format="AIR")
| 37.918033
| 107
| 0.710333
|
3900d9d036056539316824b2cb114e51443c2dd9
| 3,795
|
py
|
Python
|
ckanext/cloudstorage/cli.py
|
ranierigmusella/ckanext-cloudstorage
|
83c959d731c87a6f5d6fd08628b5714d637324c0
|
[
"MIT"
] | 1
|
2017-03-23T00:13:24.000Z
|
2017-03-23T00:13:24.000Z
|
ckanext/cloudstorage/cli.py
|
ranierigmusella/ckanext-cloudstorage
|
83c959d731c87a6f5d6fd08628b5714d637324c0
|
[
"MIT"
] | 1
|
2017-05-04T13:50:41.000Z
|
2017-05-04T13:50:41.000Z
|
ckanext/cloudstorage/cli.py
|
ranierigmusella/ckanext-cloudstorage
|
83c959d731c87a6f5d6fd08628b5714d637324c0
|
[
"MIT"
] | 5
|
2016-11-25T11:17:27.000Z
|
2020-11-26T10:46:12.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path
import cgi
from docopt import docopt
from ckan.lib.cli import CkanCommand
from ckanapi import LocalCKAN
from ckanext.cloudstorage.storage import (
CloudStorage,
ResourceCloudStorage
)
from ckanext.cloudstorage.model import (
create_tables,
drop_tables
)
USAGE = """ckanext-cloudstorage
Commands:
- fix-cors Update CORS rules where possible.
- migrate Upload local storage to the remote.
- initdb Reinitalize database tables.
Usage:
cloudstorage fix-cors <domains>... [--c=<config>]
cloudstorage migrate <path_to_storage> [--c=<config>]
cloudstorage initdb [--c=<config>]
Options:
-c=<config> The CKAN configuration file.
"""
class FakeFileStorage(cgi.FieldStorage):
def __init__(self, fp, filename):
self.file = fp
self.filename = filename
class PasterCommand(CkanCommand):
summary = 'ckanext-cloudstorage maintence utilities.'
usage = USAGE
def command(self):
self._load_config()
args = docopt(USAGE, argv=self.args)
if args['fix-cors']:
_fix_cors(args)
elif args['migrate']:
_migrate(args)
elif args['initdb']:
_initdb()
def _migrate(args):
path = args['<path_to_storage>']
if not os.path.isdir(path):
print('The storage directory cannot be found.')
return
lc = LocalCKAN()
resources = {}
# The resource folder is stuctured like so on disk:
# - storage/
# - ...
# - resources/
# - <3 letter prefix>
# - <3 letter prefix>
# - <remaining resource_id as filename>
# ...
# ...
# ...
for root, dirs, files in os.walk(path):
# Only the bottom level of the tree actually contains any files. We
# don't care at all about the overall structure.
if not files:
continue
split_root = root.split('/')
resource_id = split_root[-2] + split_root[-1]
for file_ in files:
resources[resource_id + file_] = os.path.join(
root,
file_
)
for i, resource in enumerate(resources.iteritems(), 1):
resource_id, file_path = resource
print('[{i}/{count}] Working on {id}'.format(
i=i,
count=len(resources),
id=resource_id
))
resource = lc.action.resource_show(id=resource_id)
if resource['url_type'] != 'upload':
continue
with open(os.path.join(root, file_path), 'rb') as fin:
resource['upload'] = FakeFileStorage(
fin,
resource['url'].split('/')[-1]
)
uploader = ResourceCloudStorage(resource)
uploader.upload(resource['id'])
def _fix_cors(args):
cs = CloudStorage()
if cs.can_use_advanced_azure:
from azure.storage import blob as azure_blob
from azure.storage import CorsRule
blob_service = azure_blob.BlockBlobService(
cs.driver_options['key'],
cs.driver_options['secret']
)
blob_service.set_blob_service_properties(
cors=[
CorsRule(
allowed_origins=args['<domains>'],
allowed_methods=['GET']
)
]
)
print('Done!')
else:
print(
'The driver {driver_name} being used does not currently'
' support updating CORS rules through'
' cloudstorage.'.format(
driver_name=cs.driver_name
)
)
def _initdb():
drop_tables()
create_tables()
print("DB tables are reinitialized")
| 25.13245
| 75
| 0.571014
|
15c4224bca13b1b0f4c209bbb5d43143ca1e7b09
| 355
|
py
|
Python
|
bafd/sprites/metals/__init__.py
|
TEParsons/BronzeAgeFashionDesigner
|
7a48f84290802d3b9470dffa127550de7c2b360e
|
[
"MIT"
] | null | null | null |
bafd/sprites/metals/__init__.py
|
TEParsons/BronzeAgeFashionDesigner
|
7a48f84290802d3b9470dffa127550de7c2b360e
|
[
"MIT"
] | null | null | null |
bafd/sprites/metals/__init__.py
|
TEParsons/BronzeAgeFashionDesigner
|
7a48f84290802d3b9470dffa127550de7c2b360e
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from pygame import image
from . import ores, ingots
# Get files
sprites = {}
for sprite in Path(__file__).parent.glob("*.png"):
sprites[sprite.stem] = image.load(str(sprite))
# Append files to module namespace according to filename
globals().update(sprites)
# Update module namespace
__all__ = ["__folder__"] + list(sprites)
| 27.307692
| 56
| 0.743662
|
491c3965d56305cec433e18b427cf4f4c04077df
| 1,879
|
py
|
Python
|
src/main.py
|
rubenochiavone/ternary-rk-fit
|
b378b3a6951660b7a5fa6708ad85ee55220607e6
|
[
"MIT"
] | null | null | null |
src/main.py
|
rubenochiavone/ternary-rk-fit
|
b378b3a6951660b7a5fa6708ad85ee55220607e6
|
[
"MIT"
] | null | null | null |
src/main.py
|
rubenochiavone/ternary-rk-fit
|
b378b3a6951660b7a5fa6708ad85ee55220607e6
|
[
"MIT"
] | null | null | null |
import sys
import subprocess
import os
import json
from Config import Config
import lmfit
from lmfit import Minimizer
from OutputFormatter import OutputFormatter
from TernaryRKModel import TernaryRKModel
argc = len(sys.argv)
verbose = False
if argc > 1:
for i in range(argc):
if sys.argv[i] == "-v" or sys.argv[i] == "--verbose":
verbose = True
else:
configFileName = sys.argv[i]
else:
print "Error! An input file must be specified. Call it './ternary-rk-fit <path_to_config_file>'."
sys.exit()
# unbuffer output
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
logFile = os.path.splitext(configFileName)[0] + ".out"
print "Writing output to './{}' ...".format(logFile)
# tee output to both stdout and file
tee = subprocess.Popen(["tee", logFile], stdin=subprocess.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
# print program header
OutputFormatter.printHeader()
# open config file
configFile = open(configFileName, 'r', -1)
# parse json data from config file
configJson = json.loads(configFile.read())
# close config file
configFile.close()
# print config JSON info
OutputFormatter.printConfig(configJson)
config = Config(configJson)
# retrieve params from config
equationModel = config.getEquationModel()
params = config.getParams()
data = config.getData()
exp = config.getExp()
OutputFormatter.printExperimentalData(data, exp)
minimizer = Minimizer(equationModel.residual, params, fcn_args=(data, exp, verbose))
out = minimizer.leastsq()
# show output
#lmfit.printfuncs.report_fit(out.params)
print(lmfit.fit_report(out))
# confidence
# ci = lmfit.conf_interval(minimizer, out)
# show output
# lmfit.printfuncs.report_ci(ci)
calc = equationModel.model(params, data, False)
# print results
OutputFormatter.printResults(out, data, exp, calc)
| 23.4875
| 101
| 0.729643
|
a04577222c781e4f84de776e028e3f9f0c719b7f
| 3,934
|
py
|
Python
|
python_experiments/data_analysis/aec_algorithm/k_truss_PP_SI_time.py
|
mexuaz/AccTrussDecomposition
|
15a9e8fd2f123f5acace5f3b40b94f1a74eb17d4
|
[
"MIT"
] | 9
|
2020-03-30T13:00:15.000Z
|
2022-03-17T13:40:17.000Z
|
python_experiments/data_analysis/aec_algorithm/k_truss_PP_SI_time.py
|
mexuaz/AccTrussDecomposition
|
15a9e8fd2f123f5acace5f3b40b94f1a74eb17d4
|
[
"MIT"
] | null | null | null |
python_experiments/data_analysis/aec_algorithm/k_truss_PP_SI_time.py
|
mexuaz/AccTrussDecomposition
|
15a9e8fd2f123f5acace5f3b40b94f1a74eb17d4
|
[
"MIT"
] | 2
|
2020-08-17T10:05:51.000Z
|
2020-08-30T22:57:55.000Z
|
from data_analysis.util.folder_init import init_folder_md_json_file
from data_analysis.util.get_configurations import get_config_dict_via_hostname
from data_analysis.util.read_file_utils_updated import *
from data_analysis.util.parsing_helpers import *
from data_analysis.aec_algorithm.local_config.ktruss_exec_tags import *
from data_analysis.aec_algorithm.local_config.load_data import *
from config import *
from exec_utilities import exec_utils
import json
user_output_md_file = '2020-01-10-CPU-PP-SI.md'
config_lst = ['exp-2020-01-09', [
'pkt-eid',
'pkt-eid-parallel',
'pkt-eval-tc-baseline',
'pkt-eval-tc-dtc',
'pkt-eval-tc-wp',
'cuda-pkt-offload'], ['gpu24']]
PP_SI_tag_lst = ['TC Time', 'End-To-End Time', 'Init Eid Time']
def fetch_statistics(root_dir, dataset_lst, reorder_tag, t_lst, algorithm, json_file_path):
# Dataset -> Thread Num -> Detailed Time Info
my_dict = dict()
for dataset in dataset_lst:
my_dict[dataset] = dict()
for t_num in t_lst:
file_path = os.sep.join([root_dir, dataset, reorder_tag, t_num, algorithm + '.log'])
logger.info(file_path)
lines = get_file_lines(file_path)
my_dict[dataset][t_num] = parse_lines_cuda(lines, PP_SI_tag_lst)
with open(json_file_path, 'w') as ofs:
ofs.write(json.dumps(my_dict, indent=4))
def generate_md(dataset_lst, json_file_path, logger, output_md_file):
data_names = get_data_set_names('local_config')
with open(json_file_path) as ifs:
data_dict = json.load(ifs)
# t_num = str(max(map(int, t_lst)))
t_num = str(64)
lines = [['Dataset'] + PP_SI_tag_lst, ['---' for _ in range(len(PP_SI_tag_lst) + 1)]]
# Dataset -> Thread Num -> Detailed Time Info
for data_set in dataset_lst:
time_lst = [data_dict[data_set][t_num][k] for k in PP_SI_tag_lst]
if len(list(filter(lambda e: e is None, time_lst))) == 1 and time_lst[-1] is None:
time_lst[-1] = sum(time_lst[:-1])
lines.append([data_names[data_set]] + time_lst)
bold_line = '-'.join([my_md_algorithm_name, '(' + get_comment(my_md_algorithm_name) + ')'])
generate_md_table_from_lines(bold_line, lines, logger, output_md_file)
if __name__ == '__main__':
base_dir = '/home/yche/'
# base_dir = '/Users/cheyulin/'
os.system('mkdir -p {}logs/'.format(base_dir))
my_res_log_file_folder = config_lst[0]
my_gpu_lst = config_lst[2]
for hostname in my_gpu_lst:
app_md_path = init_folder_md_json_file('..', hostname, user_output_md_file)
for my_md_algorithm_name in config_lst[1]:
json_file_path = my_res_log_file_folder + '-' + my_md_algorithm_name + '.json'
json_file_path = os.sep.join(['../data-json/', hostname, json_file_path])
log_path = my_res_log_file_folder + '-' + my_md_algorithm_name + '.log'
logger = exec_utils.get_logger('{}logs/'.format(base_dir) + log_path, __name__)
with open(app_md_path, 'a+') as output_md_file:
# Dataset -> Thread Num -> Detailed Time Info
config_dict = get_config_dict_via_hostname(hostname)
root_dir = '{}mnt/luocpu9/mnt/storage1/yche/git-repos/' \
'OutOfCoreSCAN/python_experiments/exp_results/{}/{}'. \
format(base_dir, 'exp-2020-01-09', 'gpu24')
dataset_lst = load_data_sets()
reorder_tag = 'org'
t_lst = list(map(str, config_dict[thread_num_lst_tag]))
# Fetch data and parse it as a markdown file
fetch_statistics(root_dir=root_dir, dataset_lst=dataset_lst, reorder_tag=reorder_tag, t_lst=t_lst,
algorithm=my_md_algorithm_name, json_file_path=json_file_path)
generate_md(dataset_lst, json_file_path, logger, output_md_file)
| 44.704545
| 114
| 0.658617
|
db11e6332203be0955b6844a0c90ad135f081587
| 4,232
|
py
|
Python
|
hparams.py
|
eloqute/WaveRNN
|
036674b2e3745e22f15f6f945661f5f9d8a63003
|
[
"MIT"
] | null | null | null |
hparams.py
|
eloqute/WaveRNN
|
036674b2e3745e22f15f6f945661f5f9d8a63003
|
[
"MIT"
] | null | null | null |
hparams.py
|
eloqute/WaveRNN
|
036674b2e3745e22f15f6f945661f5f9d8a63003
|
[
"MIT"
] | null | null | null |
# CONFIG -----------------------------------------------------------------------------------------------------------#
# Here are the input and output data paths (Note: you can override wav_path in preprocess.py)
wav_path = '/path/to/wav_files/'
data_path = 'data/'
# model ids are separate - that way you can use a new tts with an old wavernn and vice versa
# NB: expect undefined behaviour if models were trained on different DSP settings
voc_model_id = 'ljspeech_mol'
tts_model_id = 'ljspeech_lsa_smooth_attention'
# set this to True if you are only interested in WaveRNN
ignore_tts = False
# DSP --------------------------------------------------------------------------------------------------------------#
# Settings for all models
sample_rate = 22050
n_fft = 2048
fft_bins = n_fft // 2 + 1
num_mels = 80
hop_length = 275 # 12.5ms - in line with Tacotron 2 paper
win_length = 1100 # 50ms - same reason as above
fmin = 40
min_level_db = -100
ref_level_db = 20
bits = 9 # bit depth of signal
mu_law = True # Recommended to suppress noise if using raw bits in hp.voc_mode below
peak_norm = False # Normalise to the peak of each wav file
# WAVERNN / VOCODER ------------------------------------------------------------------------------------------------#
# Model Hparams
voc_mode = 'MOL' # either 'RAW' (softmax on raw bits) or 'MOL' (sample from mixture of logistics)
voc_upsample_factors = (5, 5, 11) # NB - this needs to correctly factorise hop_length
voc_rnn_dims = 512
voc_fc_dims = 512
voc_compute_dims = 128
voc_res_out_dims = 128
voc_res_blocks = 10
# Training
voc_batch_size = 32
voc_lr = 1e-4
voc_checkpoint_every = 25_000
voc_gen_at_checkpoint = 5 # number of samples to generate at each checkpoint
voc_total_steps = 1_000_000 # Total number of training steps
voc_test_samples = 50 # How many unseen samples to put aside for testing
voc_pad = 2 # this will pad the input so that the resnet can 'see' wider than input length
voc_seq_len = hop_length * 5 # must be a multiple of hop_length
voc_clip_grad_norm = 4 # set to None if no gradient clipping needed
# Generating / Synthesizing
voc_gen_batched = True # very fast (realtime+) single utterance batched generation
voc_target = 11_000 # target number of samples to be generated in each batch entry
voc_overlap = 550 # number of samples for crossfading between batches
# TACOTRON/TTS -----------------------------------------------------------------------------------------------------#
# Model Hparams
tts_embed_dims = 256 # embedding dimension for the graphemes/phoneme inputs
tts_encoder_dims = 128
tts_decoder_dims = 256
tts_postnet_dims = 128
tts_encoder_K = 16
tts_lstm_dims = 512
tts_postnet_K = 8
tts_num_highways = 4
tts_dropout = 0.5
tts_cleaner_names = ['english_cleaners']
tts_stop_threshold = -3.4 # Value below which audio generation ends.
# For example, for a range of [-4, 4], this
# will terminate the sequence at the first
# frame that has all values < -3.4
# Training
tts_schedule = [(7, 1e-3, 10_000, 32), # progressive training schedule
(5, 1e-4, 100_000, 32), # (r, lr, step, batch_size)
(2, 1e-4, 180_000, 16),
(2, 1e-4, 350_000, 8)]
tts_max_mel_len = 1250 # if you have a couple of extremely long spectrograms you might want to use this
tts_bin_lengths = True # bins the spectrogram lengths before sampling in data loader - speeds up training
tts_clip_grad_norm = 1.0 # clips the gradient norm to prevent explosion - set to None if not needed
tts_checkpoint_every = 2_000 # checkpoints the model every X steps
# TODO: tts_phoneme_prob = 0.0 # [0 <-> 1] probability for feeding model phonemes vrs graphemes
# ------------------------------------------------------------------------------------------------------------------#
| 44.083333
| 118
| 0.573015
|
472e2aafd9caa885a43134135931bdd4778e5d1b
| 10,745
|
py
|
Python
|
tests/regressiontests/transactions_regress/tests.py
|
wnyc/django
|
470deb5cbb765e2e731c5b0b184247c7f87482aa
|
[
"BSD-3-Clause"
] | 1
|
2022-02-05T13:41:30.000Z
|
2022-02-05T13:41:30.000Z
|
tests/regressiontests/transactions_regress/tests.py
|
wnyc/django
|
470deb5cbb765e2e731c5b0b184247c7f87482aa
|
[
"BSD-3-Clause"
] | 1
|
2016-02-19T00:22:18.000Z
|
2016-02-19T00:22:18.000Z
|
tests/regressiontests/transactions_regress/tests.py
|
wnyc/django
|
470deb5cbb765e2e731c5b0b184247c7f87482aa
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from django.db import connection, connections, transaction, DEFAULT_DB_ALIAS, DatabaseError
from django.db.transaction import commit_on_success, commit_manually, TransactionManagementError
from django.test import TransactionTestCase, skipUnlessDBFeature
from django.test.utils import override_settings
from django.utils.unittest import skipIf, skipUnless
from .models import Mod, M2mA, M2mB
class TestTransactionClosing(TransactionTestCase):
"""
Tests to make sure that transactions are properly closed
when they should be, and aren't left pending after operations
have been performed in them. Refs #9964.
"""
def test_raw_committed_on_success(self):
"""
Make sure a transaction consisting of raw SQL execution gets
committed by the commit_on_success decorator.
"""
@commit_on_success
def raw_sql():
"Write a record using raw sql under a commit_on_success decorator"
cursor = connection.cursor()
cursor.execute("INSERT into transactions_regress_mod (id,fld) values (17,18)")
raw_sql()
# Rollback so that if the decorator didn't commit, the record is unwritten
transaction.rollback()
try:
# Check that the record is in the DB
obj = Mod.objects.get(pk=17)
self.assertEqual(obj.fld, 18)
except Mod.DoesNotExist:
self.fail("transaction with raw sql not committed")
def test_commit_manually_enforced(self):
"""
Make sure that under commit_manually, even "read-only" transaction require closure
(commit or rollback), and a transaction left pending is treated as an error.
"""
@commit_manually
def non_comitter():
"Execute a managed transaction with read-only operations and fail to commit"
_ = Mod.objects.count()
self.assertRaises(TransactionManagementError, non_comitter)
def test_commit_manually_commit_ok(self):
"""
Test that under commit_manually, a committed transaction is accepted by the transaction
management mechanisms
"""
@commit_manually
def committer():
"""
Perform a database query, then commit the transaction
"""
_ = Mod.objects.count()
transaction.commit()
try:
committer()
except TransactionManagementError:
self.fail("Commit did not clear the transaction state")
def test_commit_manually_rollback_ok(self):
"""
Test that under commit_manually, a rolled-back transaction is accepted by the transaction
management mechanisms
"""
@commit_manually
def roller_back():
"""
Perform a database query, then rollback the transaction
"""
_ = Mod.objects.count()
transaction.rollback()
try:
roller_back()
except TransactionManagementError:
self.fail("Rollback did not clear the transaction state")
def test_commit_manually_enforced_after_commit(self):
"""
Test that under commit_manually, if a transaction is committed and an operation is
performed later, we still require the new transaction to be closed
"""
@commit_manually
def fake_committer():
"Query, commit, then query again, leaving with a pending transaction"
_ = Mod.objects.count()
transaction.commit()
_ = Mod.objects.count()
self.assertRaises(TransactionManagementError, fake_committer)
@skipUnlessDBFeature('supports_transactions')
def test_reuse_cursor_reference(self):
"""
Make sure transaction closure is enforced even when the queries are performed
through a single cursor reference retrieved in the beginning
(this is to show why it is wrong to set the transaction dirty only when a cursor
is fetched from the connection).
"""
@commit_on_success
def reuse_cursor_ref():
"""
Fetch a cursor, perform an query, rollback to close the transaction,
then write a record (in a new transaction) using the same cursor object
(reference). All this under commit_on_success, so the second insert should
be committed.
"""
cursor = connection.cursor()
cursor.execute("INSERT into transactions_regress_mod (id,fld) values (1,2)")
transaction.rollback()
cursor.execute("INSERT into transactions_regress_mod (id,fld) values (1,2)")
reuse_cursor_ref()
# Rollback so that if the decorator didn't commit, the record is unwritten
transaction.rollback()
try:
# Check that the record is in the DB
obj = Mod.objects.get(pk=1)
self.assertEqual(obj.fld, 2)
except Mod.DoesNotExist:
self.fail("After ending a transaction, cursor use no longer sets dirty")
def test_failing_query_transaction_closed(self):
"""
Make sure that under commit_on_success, a transaction is rolled back even if
the first database-modifying operation fails.
This is prompted by http://code.djangoproject.com/ticket/6669 (and based on sample
code posted there to exemplify the problem): Before Django 1.3,
transactions were only marked "dirty" by the save() function after it successfully
wrote the object to the database.
"""
from django.contrib.auth.models import User
@transaction.commit_on_success
def create_system_user():
"Create a user in a transaction"
user = User.objects.create_user(username='system', password='iamr00t', email='root@SITENAME.com')
# Redundant, just makes sure the user id was read back from DB
Mod.objects.create(fld=user.id)
# Create a user
create_system_user()
with self.assertRaises(DatabaseError):
# The second call to create_system_user should fail for violating
# a unique constraint (it's trying to re-create the same user)
create_system_user()
# Try to read the database. If the last transaction was indeed closed,
# this should cause no problems
User.objects.all()[0]
@override_settings(DEBUG=True)
def test_failing_query_transaction_closed_debug(self):
"""
Regression for #6669. Same test as above, with DEBUG=True.
"""
self.test_failing_query_transaction_closed()
@skipUnless(connection.vendor == 'postgresql',
"This test only valid for PostgreSQL")
class TestPostgresAutocommit(TransactionTestCase):
"""
Tests to make sure psycopg2's autocommit mode is restored after entering
and leaving transaction management. Refs #16047.
"""
def setUp(self):
from psycopg2.extensions import (ISOLATION_LEVEL_AUTOCOMMIT,
ISOLATION_LEVEL_READ_COMMITTED)
self._autocommit = ISOLATION_LEVEL_AUTOCOMMIT
self._read_committed = ISOLATION_LEVEL_READ_COMMITTED
# We want a clean backend with autocommit = True, so
# first we need to do a bit of work to have that.
self._old_backend = connections[DEFAULT_DB_ALIAS]
settings = self._old_backend.settings_dict.copy()
opts = settings['OPTIONS'].copy()
opts['autocommit'] = True
settings['OPTIONS'] = opts
new_backend = self._old_backend.__class__(settings, DEFAULT_DB_ALIAS)
connections[DEFAULT_DB_ALIAS] = new_backend
def tearDown(self):
connections[DEFAULT_DB_ALIAS] = self._old_backend
def test_initial_autocommit_state(self):
self.assertTrue(connection.features.uses_autocommit)
self.assertEqual(connection.isolation_level, self._autocommit)
def test_transaction_management(self):
transaction.enter_transaction_management()
transaction.managed(True)
self.assertEqual(connection.isolation_level, self._read_committed)
transaction.leave_transaction_management()
self.assertEqual(connection.isolation_level, self._autocommit)
def test_transaction_stacking(self):
transaction.enter_transaction_management()
transaction.managed(True)
self.assertEqual(connection.isolation_level, self._read_committed)
transaction.enter_transaction_management()
self.assertEqual(connection.isolation_level, self._read_committed)
transaction.leave_transaction_management()
self.assertEqual(connection.isolation_level, self._read_committed)
transaction.leave_transaction_management()
self.assertEqual(connection.isolation_level, self._autocommit)
class TestManyToManyAddTransaction(TransactionTestCase):
def test_manyrelated_add_commit(self):
"Test for https://code.djangoproject.com/ticket/16818"
a = M2mA.objects.create()
b = M2mB.objects.create(fld=10)
a.others.add(b)
# We're in a TransactionTestCase and have not changed transaction
# behavior from default of "autocommit", so this rollback should not
# actually do anything. If it does in fact undo our add, that's a bug
# that the bulk insert was not auto-committed.
transaction.rollback()
self.assertEqual(a.others.count(), 1)
class SavepointTest(TransactionTestCase):
@skipUnlessDBFeature('uses_savepoints')
def test_savepoint_commit(self):
@commit_manually
def work():
mod = Mod.objects.create(fld=1)
pk = mod.pk
sid = transaction.savepoint()
mod1 = Mod.objects.filter(pk=pk).update(fld=10)
transaction.savepoint_commit(sid)
mod2 = Mod.objects.get(pk=pk)
transaction.commit()
self.assertEqual(mod2.fld, 10)
work()
@skipIf(connection.vendor == 'mysql' and \
connection.features._mysql_storage_engine == 'MyISAM',
"MyISAM MySQL storage engine doesn't support savepoints")
@skipUnlessDBFeature('uses_savepoints')
def test_savepoint_rollback(self):
@commit_manually
def work():
mod = Mod.objects.create(fld=1)
pk = mod.pk
sid = transaction.savepoint()
mod1 = Mod.objects.filter(pk=pk).update(fld=20)
transaction.savepoint_rollback(sid)
mod2 = Mod.objects.get(pk=pk)
transaction.commit()
self.assertEqual(mod2.fld, 1)
work()
| 39.503676
| 109
| 0.660679
|
af586a9751d7d13df9e631f3b53daf05bf4ecab8
| 900
|
py
|
Python
|
codes/Archive/recalculate_optimal_solution.py
|
htalebiyan/Dec2py
|
8c4181eb92d6e52aef8cc804c485865516cee200
|
[
"MIT"
] | null | null | null |
codes/Archive/recalculate_optimal_solution.py
|
htalebiyan/Dec2py
|
8c4181eb92d6e52aef8cc804c485865516cee200
|
[
"MIT"
] | null | null | null |
codes/Archive/recalculate_optimal_solution.py
|
htalebiyan/Dec2py
|
8c4181eb92d6e52aef8cc804c485865516cee200
|
[
"MIT"
] | null | null | null |
import pickle
import indp
root = '/home/hesam/Desktop/Files/Game_Shelby_County/results/ng_results_L4_m92_v12_OPTIMISTIC_OPTIMAL/'
with open(root+'objs_30.pkl', 'rb') as f:
obj = pickle.load(f)
BASE_DIR = "../data/Extended_Shelby_County/"
DAMAGE_DIR = "../data/Wu_Damage_scenarios/"
obj.net, _, _ = indp.initialize_network(BASE_DIR=BASE_DIR,
external_interdependency_dir=None,
sim_number=0, magnitude=6, sample=0, v=12,
shelby_data='shelby_extended')
indp.add_Wu_failure_scenario(obj.net, DAM_DIR=DAMAGE_DIR,
noSet=30, noSce=92)
N_hat_prime= [n for n in obj.net.G.nodes(data=True) if n[1]['data']['inf_data'].repaired==0.0]
for t in range(obj.time_steps):
obj.objs[t+1].find_optimal_solution()
print(obj.objs[t+1].optimal_solution['total cost'])
indp.apply_recovery(obj.net, obj.results, t+1)
obj.save_object_to_file()
| 42.857143
| 103
| 0.704444
|
70c89d8969b611bcd3e6080b1a85f04a982317a9
| 3,710
|
py
|
Python
|
nova/tests/api/openstack/volume/contrib/test_types_manage.py
|
bopopescu/extra-specs-1
|
6a14d8d7807727023b4d589af47e8a9605f12db1
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/api/openstack/volume/contrib/test_types_manage.py
|
bopopescu/extra-specs-1
|
6a14d8d7807727023b4d589af47e8a9605f12db1
|
[
"Apache-2.0"
] | 1
|
2020-07-24T14:14:13.000Z
|
2020-07-24T14:14:13.000Z
|
nova/tests/api/openstack/volume/contrib/test_types_manage.py
|
bopopescu/extra-specs-1
|
6a14d8d7807727023b4d589af47e8a9605f12db1
|
[
"Apache-2.0"
] | 1
|
2020-07-24T10:40:59.000Z
|
2020-07-24T10:40:59.000Z
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.volume.contrib import types_manage
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
from nova.volume import volume_types
def stub_volume_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
def return_volume_types_get_volume_type(context, id):
if id == "777":
raise exception.VolumeTypeNotFound(volume_type_id=id)
return stub_volume_type(int(id))
def return_volume_types_destroy(context, name):
if name == "777":
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
pass
def return_volume_types_create(context, name, specs):
pass
def return_volume_types_get_by_name(context, name):
if name == "777":
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
return stub_volume_type(int(name.split("_")[2]))
class VolumeTypesManageApiTest(test.TestCase):
def setUp(self):
super(VolumeTypesManageApiTest, self).setUp()
self.controller = types_manage.VolumeTypesManageController()
def test_volume_types_delete(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
self.stubs.Set(volume_types, 'destroy',
return_volume_types_destroy)
req = fakes.HTTPRequest.blank('/v1/fake/types/1')
self.controller._delete(req, 1)
def test_volume_types_delete_not_found(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
self.stubs.Set(volume_types, 'destroy',
return_volume_types_destroy)
req = fakes.HTTPRequest.blank('/v1/fake/types/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller._delete,
req, '777')
def test_create(self):
self.stubs.Set(volume_types, 'create',
return_volume_types_create)
self.stubs.Set(volume_types, 'get_volume_type_by_name',
return_volume_types_get_by_name)
body = {"volume_type": {"name": "vol_type_1",
"extra_specs": {"key1": "value1"}}}
req = fakes.HTTPRequest.blank('/v1/fake/types')
res_dict = self.controller._create(req, body)
self.assertEqual(1, len(res_dict))
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
def test_create_empty_body(self):
self.stubs.Set(volume_types, 'create',
return_volume_types_create)
self.stubs.Set(volume_types, 'get_volume_type_by_name',
return_volume_types_get_by_name)
req = fakes.HTTPRequest.blank('/v1/fake/types')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller._create, req, '')
| 35.673077
| 78
| 0.660647
|
98ed75b5ff9741b58149e7d6e193b12ce85e9fb3
| 97,658
|
py
|
Python
|
conans/client/migrations_settings.py
|
a4z/conan
|
dec9e0288f81462c53b9222a206002fbc525ea65
|
[
"MIT"
] | null | null | null |
conans/client/migrations_settings.py
|
a4z/conan
|
dec9e0288f81462c53b9222a206002fbc525ea65
|
[
"MIT"
] | null | null | null |
conans/client/migrations_settings.py
|
a4z/conan
|
dec9e0288f81462c53b9222a206002fbc525ea65
|
[
"MIT"
] | null | null | null |
settings_1_9_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0"]
watchOS:
version: ["4.0"]
tvOS:
version: ["11.0"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
settings_1_9_1 = settings_1_9_0
settings_1_9_2 = settings_1_9_1
settings_1_10_0 = settings_1_9_2
settings_1_10_1 = settings_1_10_0
settings_1_10_2 = settings_1_10_1
settings_1_11_0 = settings_1_10_2
settings_1_11_1 = settings_1_11_0
settings_1_11_2 = settings_1_11_1
settings_1_11_3 = settings_1_11_2
settings_1_12_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
settings_1_12_1 = settings_1_12_0
settings_1_12_2 = settings_1_12_1
settings_1_12_3 = settings_1_12_2
settings_1_12_4 = settings_1_12_3
settings_1_13_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
settings_1_13_1 = settings_1_13_0
settings_1_13_2 = settings_1_13_1
settings_1_13_3 = settings_1_13_2
settings_1_13_4 = settings_1_13_3
settings_1_14_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
settings_1_14_1 = settings_1_14_0
settings_1_14_2 = settings_1_14_1
settings_1_14_3 = settings_1_14_2
settings_1_14_4 = settings_1_14_3
settings_1_14_5 = settings_1_14_4
settings_1_14_6 = settings_1_14_5
settings_1_15_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
Emscripten:
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2",
"9"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_15_1 = settings_1_15_0
settings_1_15_2 = settings_1_15_1
settings_1_15_3 = settings_1_15_2
settings_1_15_4 = settings_1_15_3
settings_1_15_5 = settings_1_15_4
settings_1_16_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2", "8.3",
"9", "9.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_16_1 = settings_1_16_0
settings_1_17_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2", "8.3",
"9", "9.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_17_1 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2", "8.3",
"9", "9.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_17_2 = settings_1_17_1
settings_1_18_0 = settings_1_17_2
settings_1_18_1 = settings_1_18_0
settings_1_18_2 = settings_1_18_1
settings_1_18_3 = settings_1_18_2
settings_1_18_4 = settings_1_18_3
settings_1_18_5 = settings_1_18_4
settings_1_19_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_19_1 = settings_1_19_0
settings_1_19_2 = settings_1_19_1
settings_1_19_3 = settings_1_19_2
settings_1_20_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_20_1 = settings_1_20_0
settings_1_20_2 = settings_1_20_1
settings_1_20_3 = settings_1_20_2
settings_1_20_4 = settings_1_20_3
settings_1_20_5 = settings_1_20_4
settings_1_21_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_21_1 = settings_1_21_0
settings_1_21_2 = settings_1_21_1
settings_1_21_3 = settings_1_21_2
settings_1_22_0 = settings_1_21_2
settings_1_22_1 = settings_1_22_0
settings_1_22_2 = settings_1_22_1
settings_1_22_3 = settings_1_22_2
settings_1_23_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_24_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_24_1 = settings_1_24_0
settings_1_25_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2", "9.3",
"10"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_25_1 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_25_2 = settings_1_25_1
settings_1_26_0 = settings_1_25_2
settings_1_26_1 = settings_1_26_0
settings_1_27_0 = settings_1_26_1
settings_1_27_1 = settings_1_27_0
settings_1_28_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_28_1 = settings_1_28_0
settings_1_28_2 = settings_1_28_1
settings_1_29_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_29_1 = settings_1_29_0
settings_1_29_2 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_30_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_30_1 = settings_1_30_0
settings_1_30_2 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_31_0 = settings_1_30_2
settings_1_31_1 = settings_1_31_0
settings_1_31_2 = settings_1_31_1
settings_1_31_3 = settings_1_31_2
settings_1_31_4 = settings_1_31_3
settings_1_32_0 = settings_1_31_4
settings_1_32_1 = settings_1_32_0
settings_1_33_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0"]
sdk: [None, "macosx"]
subsystem: [None, "Catalyst"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_33_1 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_34_0 = settings_1_33_1
settings_1_35_0 = settings_1_34_0
| 47.337857
| 266
| 0.537355
|
f77ddc2d022929177bf186b19e8e9dcb867fe198
| 1,194
|
py
|
Python
|
src/flair_management/skin_manager/weight_editor.py
|
PxT00/valorant-skin-cli
|
4d142174e53769c10a06f2ba5343d8a4d4a7512c
|
[
"MIT"
] | 2
|
2021-08-11T02:09:15.000Z
|
2021-08-11T02:09:19.000Z
|
src/flair_management/skin_manager/weight_editor.py
|
PxT00/valorant-skin-cli
|
4d142174e53769c10a06f2ba5343d8a4d4a7512c
|
[
"MIT"
] | null | null | null |
src/flair_management/skin_manager/weight_editor.py
|
PxT00/valorant-skin-cli
|
4d142174e53769c10a06f2ba5343d8a4d4a7512c
|
[
"MIT"
] | null | null | null |
from InquirerPy import prompt, inquirer
from InquirerPy.separator import Separator
from ...flair_management.skin_manager.skin_manager import Skin_Manager
from .weapon_config_prompts import Prompts
class Weight_Editor:
@staticmethod
def weights_entrypoint():
weapon_data, skin_data, skin_choice, weapon_choice, weapon_skin_data = Prompts.select_weapon_type(change_all=False, weights=True)
while weapon_data is not None:
weapon_data['skins'][skin_choice]["weight"] = Weight_Editor.set_weight(weapon_skin_data)
Skin_Manager.modify_skin_data(skin_data)
weapon_data, skin_data, skin_choice, weapon_choice, weapon_skin_data = Prompts.select_skin(skin_data, weapon_choice, change_all=False, weights=True)
def set_weight(skin_data):
current_weight = str(skin_data["weight"])
new_weight = inquirer.text(
message=f"[{skin_data['display_name']}] Selecione o peso para a aleatorização (peso atual {current_weight})",
default=current_weight,
validate=lambda result: result.isdigit(),
filter=lambda result: int(result)
).execute()
return new_weight
| 41.172414
| 160
| 0.718593
|
cede76755d5818aef7b0ecc3430634bd21bc459a
| 21,409
|
py
|
Python
|
arcpyext/mapping/_mapping2.py
|
dcworldwide/arcpyext
|
47ab401b74a2e296f7fc1eef51f80500d1300033
|
[
"BSD-3-Clause"
] | null | null | null |
arcpyext/mapping/_mapping2.py
|
dcworldwide/arcpyext
|
47ab401b74a2e296f7fc1eef51f80500d1300033
|
[
"BSD-3-Clause"
] | null | null | null |
arcpyext/mapping/_mapping2.py
|
dcworldwide/arcpyext
|
47ab401b74a2e296f7fc1eef51f80500d1300033
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
"""This module contains extended functionality for related to the arcpy.mapping module."""
# Python 2/3 compatibility
# pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-order,wrong-import-position
from __future__ import (absolute_import, division, print_function, unicode_literals)
from future.builtins.disabled import *
from future.builtins import *
from future.standard_library import install_aliases
install_aliases()
from future.moves.collections import deque
from future.utils import viewitems
# pylint: enable=wildcard-import,unused-wildcard-import,wrong-import-order,wrong-import-position
# Standard lib imports
import ctypes
import logging
import os.path
import re
# Third-party imports
import arcpy
import olefile
# Local imports
from .. import _native as _ao
from ..exceptions import DataSourceUpdateError
def get_version(map_document):
"""Gets the version of a given map document (or path to a map document)."""
if isinstance(map_document, arcpy.mapping.MapDocument):
fp = map_document.filePath
else:
fp = map_document
with olefile.OleFileIO(fp) as o:
if o.exists("Version"):
with o.openstream("Version") as s:
return s.read().decode("utf-16").split("\x00")[1]
return None
def open_document(mxd):
"""Open a arcpy.mapping.MapDocument from a given path.
If the path is already a MapDocument, this is a no-op.
"""
import arcpy
if isinstance(mxd, arcpy.mapping.MapDocument):
return mxd
return arcpy.mapping.MapDocument(mxd)
def _change_data_source(layer, new_layer_source):
logger = _get_logger()
workspace_path = new_layer_source["workspacePath"]
dataset_name = new_layer_source.get("datasetName")
workspace_type = new_layer_source.get("workspaceType")
schema = new_layer_source.get("schema")
logger.debug("Workspace path: {}".format(workspace_path))
logger.debug("Dataset name: {}".format(dataset_name))
logger.debug("Workspace type: {}".format(workspace_type))
logger.debug("Schema: {}".format(schema))
try:
if ((not hasattr(layer, "supports") or layer.supports("workspacePath"))
and (dataset_name == None and workspace_type == None and schema == None)):
# Tests if layer is actually a layer object (i.e. has a "support" function) or table view (which doesn't,
# but always supports "workspacePath"). Can't test on type (arcpy.mapping.TableView) as that doesn't work
# on ArcPy 10.0
layer.findAndReplaceWorkspacePath("", workspace_path, validate=False)
return
kwargs = {"validate": False}
if dataset_name == None and hasattr(layer, "supports") and layer.supports("datasetName"):
if layer.supports("workspacePath"):
dataset_name = layer.dataSource.replace(layer.workspacePath, "")
else:
dataset_name = layer.datasetName
if dataset_name != None:
# break apart dataset_name into it's component parts
ds_user, ds_name, fc_user, fc_name = _parse_data_source(dataset_name)
if workspace_type == "FILEGDB_WORKSPACE":
# file GDB's don't have schema/users, so if switching to that type, remove schema (if still included)
dataset_name = fc_name
elif schema != None:
dataset_name = "{0}.{1}".format(schema, fc_name)
kwargs["dataset_name"] = dataset_name
if workspace_type != None:
kwargs["workspace_type"] = workspace_type
layer.replaceDataSource(workspace_path, **kwargs)
except Exception as e:
logger.exception("Exception raised by ArcPy")
raise DataSourceUpdateError("Exception raised internally by ArcPy", layer, e)
if hasattr(layer, "isBroken") and layer.isBroken:
raise DataSourceUpdateError("Layer is now broken.", layer)
def _get_data_source_desc(layer_or_table):
if hasattr(layer_or_table, "supports"):
if not layer_or_table.supports("DATASOURCE"):
return None
return layer_or_table.dataSource
def _get_logger():
return logging.getLogger("arcpyext.mapping")
def _get_spatial_ref(code):
return arcpy.SpatialReference(code)
def _list_layers(map_document, data_frame):
return arcpy.mapping.ListLayers(map_document, None, data_frame)
def _list_maps(map_document):
return arcpy.mapping.ListDataFrames(map_document)
def _list_tables(map_document, data_frame):
return arcpy.mapping.ListTableViews(map_document, None, data_frame)
def _native_add_data_connection_details(idataset, layer_details):
import ESRI.ArcGIS.Geodatabase as esriGeoDatabase
import ESRI.ArcGIS.esriSystem as esriSystem
if bool(idataset):
# can enrich with database details
workspace = _ao.cast_obj(idataset.Workspace, esriGeoDatabase.IWorkspace)
property_set = _ao.cast_obj(workspace.ConnectionProperties, esriSystem.IPropertySet)
_, property_keys, property_values = property_set.GetAllProperties(None, None)
connection_properties = dict(zip(property_keys, property_values))
layer_details["userName"] = connection_properties.get("USER")
layer_details["server"] = connection_properties.get("SERVER")
layer_details["service"] = connection_properties.get("INSTANCE")
layer_details["database"] = connection_properties.get("DATABASE")
# TODO: Implement details for web service layer
def _native_describe_fields(layer_or_table_fields):
import ESRI.ArcGIS.Geodatabase as esriGeoDatabase
def field_type_id_to_name(f_type_id):
field_types = [
"SmallInteger", "Integer", "Single", "Double", "String", "Date", "OID", "Geometry", "Blob", "Raster",
"Guid", "GlobalID", "Xml"
]
if f_type_id >= 0 and f_type_id < len(field_types):
return field_types[f_type_id]
return None
if not layer_or_table_fields:
return None
fields = [
{
"field": _ao.cast_obj(layer_or_table_fields.get_Field(i), esriGeoDatabase.IField2),
"fieldInfo": _ao.cast_obj(layer_or_table_fields.get_FieldInfo(i), esriGeoDatabase.IFieldInfo),
"index": i
} for i in range(0, layer_or_table_fields.FieldCount)
]
return [
{
"alias": f["fieldInfo"].Alias,
"index": f["index"],
"name": f["field"].Name,
"type": field_type_id_to_name(f["field"].Type),
"visible": f["fieldInfo"].Visible
} for f in fields
]
def _native_describe_layer(layer_parts):
# avoid boxing/unboxing issues
# not sure if this is really necessary, but getting weird results without it
layer_is_valid = layer_parts["layer"].Valid
layer_is_visible = layer_parts["layer"].Visible
layer_name = layer_parts["layer"].Name
layer_details = {
"dataSource": _native_get_data_source(layer_parts),
"database": None,
"datasetName": _native_get_dataset_name(layer_parts["dataset"]),
"datasetType": _native_get_dataset_type(layer_parts["dataset"]),
"definitionQuery": _native_get_definition_query(layer_parts["featureLayerDefinition"]),
"fields": _native_describe_fields(layer_parts["layerFields"]),
"index": layer_parts["index"],
"isBroken": not layer_is_valid,
"isFeatureLayer": bool(layer_parts["featureLayer"]),
"isNetworkAnalystLayer": bool(layer_parts["networkAnalystLayer"]),
"isRasterLayer": bool(layer_parts["rasterLayer"]),
"isRasterizingLayer": None, # not implemented yet
"isServiceLayer": None, # not implemented yet
"isGroupLayer": not layer_parts["groupLayer"] == None,
"longName": "\\".join(_native_get_layer_name_parts(layer_parts)),
"name": layer_name,
"server": None,
"service": None,
"serviceId": _native_get_service_layer_property_value(layer_parts["serverLayerExtensions"], "ServiceLayerID"),
"userName": None,
"visible": layer_is_visible
}
_native_add_data_connection_details(layer_parts["dataset"], layer_details)
return layer_details
def _native_describe_map(map_document, map_frame):
# make the map frame active before getting details about it.
_native_make_map_frame_active_view(map_frame)
return {
"name": map_frame.Name,
"spatialReference": _get_spatial_ref(_native_get_map_spatial_ref_code(map_document, map_frame)),
"layers": [_native_describe_layer(l) for l in _native_list_layers(map_document, map_frame)],
"tables": [_native_describe_table(t) for t in _native_list_tables(map_document, map_frame)]
}
def _native_describe_table(table_parts):
table_details = {
"dataSource": _native_get_data_source(table_parts),
"database": None,
"datasetName": _native_get_dataset_name(table_parts["tableDataset"]),
"datasetType": _native_get_dataset_type(table_parts["tableDataset"]),
"definitionQuery": _native_get_definition_query(table_parts["standaloneTableDefinition"]),
"fields": _native_describe_fields(table_parts["standaloneTableFields"]),
"name": table_parts["standaloneTable"].Name,
"index": table_parts["index"],
"isBroken": not table_parts["standaloneTable"].Valid,
"server": None,
"service": None,
"serviceId": _native_get_service_layer_property_value(table_parts["serverLayerExtensions"], "ServiceTableID"),
"userName": None
}
_native_add_data_connection_details(table_parts["tableDataset"], table_details)
return table_details
def _native_get_data_source(layer_or_table):
"""Attempts to get the path to the data source for a given layer or table."""
import ESRI.ArcGIS.Geodatabase as esriGeoDatabase
path = None
if layer_or_table.get("featureLayer"):
# input is a feature layer
if layer_or_table["featureLayer"].FeatureClass:
feature_class_name = layer_or_table["dataset"].Name
workspace = _ao.cast_obj(layer_or_table["dataset"].Workspace, esriGeoDatabase.IWorkspace)
workspace_path = workspace.PathName
feature_class = _ao.cast_obj(layer_or_table["featureLayer"].FeatureClass, esriGeoDatabase.IFeatureClass)
# Test if feature dataset in use, NULL COM pointers return falsey
if feature_class.FeatureDataset:
feature_dataset = _ao.cast_obj(feature_class.FeatureDataset, esriGeoDatabase.IFeatureDataset)
feature_dataset_name = feature_dataset.Name
path = os.path.join(workspace_path, feature_dataset_name, feature_class_name)
else:
path = os.path.join(workspace_path, feature_class_name)
elif layer_or_table.get("tableDataset"):
# input is a standalone table
table_name = layer_or_table["tableDataset"].Name
workspace = _ao.cast_obj(layer_or_table["tableDataset"].Workspace, esriGeoDatabase.IWorkspace)
workspace_path = workspace.PathName
path = os.path.join(workspace_path, table_name)
return path
def _native_get_dataset_name(idataset):
dataset_name = None
if idataset:
dataset_name = idataset.Name
return dataset_name
def _native_get_dataset_type(idataset):
dataset_type = None
if idataset:
dataset_type = idataset.Category
return dataset_type
def _native_get_definition_query(feature_layer_or_table_definition):
definition_query = None
if feature_layer_or_table_definition:
definition_query = feature_layer_or_table_definition.DefinitionExpression
return definition_query
def _native_get_layer_name_parts(layer):
name_parts = deque()
def get_parent_layer_name(child_layer):
# add to name parts
name_parts.appendleft(child_layer["layer"].Name)
if child_layer["parent"]:
get_parent_layer_name(child_layer["parent"])
get_parent_layer_name(layer)
return name_parts
def _native_get_service_layer_property_value(service_layer_extensions, property_key):
# flatten layer server extensions into a list of server property dictionaries
# ServerProperties.GetAllProperties() returns two lists, names and values, so zip them and turn them into a dictionary
import ESRI.ArcGIS.esriSystem as esriSystem
layer_extensions_server_properties = []
for sle in service_layer_extensions:
if sle == None:
continue
property_set = _ao.cast_obj(sle.ServerProperties, esriSystem.IPropertySet)
_, property_keys, property_values = property_set.GetAllProperties(None, None)
if len(property_keys) > 0:
properties = dict(zip(property_keys, property_values))
layer_extensions_server_properties.append(properties)
# find service layer ID, if it exists
# value may be returned non-unique, this will be checked further up the stack
service_layer_id = None
for props in layer_extensions_server_properties:
for key, value in viewitems(props):
if key == property_key:
return value
return service_layer_id
def _native_list_layers(map_document, map_frame):
"""Recursively iterates through a map frame to get all layers, building up parent relationships as it goes."""
# get the ArcObjects types we need
import ESRI.ArcGIS.Geodatabase as esriGeoDatabase
import ESRI.ArcGIS.Carto as esriCarto
import ESRI.ArcGIS.NetworkAnalyst as esriNetworkAnalyst
# list of all layers that we'll be returning
layers = []
def build_layer_parts(map_layer):
layer_parts = {
"children": [],
"dataset": None,
"layer": _ao.cast_obj(map_layer, esriCarto.ILayer2),
"layerFields": _ao.cast_obj(map_layer, esriCarto.ILayerFields),
"featureLayer": _ao.cast_obj(map_layer, esriCarto.IFeatureLayer),
"featureLayerDefinition": _ao.cast_obj(map_layer, esriCarto.IFeatureLayerDefinition2),
"groupLayer": _ao.cast_obj(map_layer, esriCarto.IGroupLayer),
"index": len(layers), # map index will be the same as the current length of this array
"networkAnalystLayer": _ao.cast_obj(map_layer, esriNetworkAnalyst.INALayer),
"parent": None,
"rasterLayer": _ao.cast_obj(map_layer, esriCarto.IRasterLayer),
"serverLayerExtensions": None
}
if bool(layer_parts["featureLayer"]):
layer_parts["dataset"] = _ao.cast_obj(layer_parts["featureLayer"].FeatureClass, esriGeoDatabase.IDataset)
# Get server layer extensions
layer_extensions = _ao.cast_obj(map_layer, esriCarto.ILayerExtensions)
layer_parts["serverLayerExtensions"] = [
sle for sle in (_ao.cast_obj(layer_extensions.get_Extension(i), esriCarto.IServerLayerExtension)
for i in range(0, layer_extensions.get_ExtensionCount())) if sle is not None
]
layers.append(layer_parts)
return layer_parts
def get_child_layers(layer_parts, parent_parts):
# Set parent
layer_parts["parent"] = parent_parts
if not bool(layer_parts["groupLayer"]):
# layer is not a group layer, ignore
return
# layer is a group layer, cast to ICompositeLayer to get access to child layers
composite_layer = _ao.cast_obj(layer_parts["layer"], esriCarto.ICompositeLayer)
for i in range(0, composite_layer.Count):
# get child layer
child_layer = _ao.cast_obj(composite_layer.get_Layer(i), esriCarto.ILayer2)
# get child layer parts
child_layer_parts = build_layer_parts(child_layer)
# add child_layer_parts to the list of children for the current layer
layer_parts["children"].append(child_layer_parts)
# recursively find children
get_child_layers(child_layer_parts, layer_parts)
# iterate through the top level of layers
map_layer_iterator = map_frame.get_Layers(None, False)
map_layer_iterator = _ao.cast_obj(map_layer_iterator, esriCarto.IEnumLayer)
map_layer = map_layer_iterator.Next()
while (map_layer):
layer_parts = build_layer_parts(map_layer)
get_child_layers(layer_parts, None)
map_layer = map_layer_iterator.Next()
return layers
def _native_list_maps(map_document):
"""Gets a list of IMaps (Data Frames) from the provided map document."""
# get the ArcObjects types we need
import ESRI.ArcGIS.Carto as esriCarto
# make sure map document is a map document
map_document = _ao.cast_obj(map_document, esriCarto.IMapDocument)
# iterate the list of maps, casting each one to IMap
return [_ao.cast_obj(map_document.get_Map(i), esriCarto.IMap) for i in range(0, map_document.MapCount)]
def _native_list_tables(map_document, map_frame):
"""Iterates through a map frame to get all tables."""
# get the ArcObjects types we need
import ESRI.ArcGIS.Carto as esriCarto
import ESRI.ArcGIS.Geodatabase as esriGeoDatabase
# list of all tables
tables = []
def build_table_parts(standalone_table):
table_parts = {
"index": len(tables), # map index will be the same as the current length of this array
"standaloneTable": standalone_table,
"standaloneTableDataset": _ao.cast_obj(standalone_table, esriGeoDatabase.IDataset),
"standaloneTableDefinition": _ao.cast_obj(standalone_table, esriCarto.ITableDefinition),
"standaloneTableFields": _ao.cast_obj(standalone_table, esriGeoDatabase.ITableFields),
"table": _ao.cast_obj(standalone_table.Table, esriGeoDatabase.ITable),
"tableDataset": _ao.cast_obj(standalone_table.Table, esriGeoDatabase.IDataset),
"serverLayerExtensions": None
}
# Get server layer extensions
table_extensions = _ao.cast_obj(standalone_table, esriCarto.ITableExtensions)
table_parts["serverLayerExtensions"] = [
sle for sle in (_ao.cast_obj(table_extensions.get_Extension(i), esriCarto.IServerLayerExtension)
for i in range(0, table_extensions.get_ExtensionCount())) if sle is not None
]
tables.append(table_parts)
return table_parts
# cast map to a standalone table collection to get access to tables
table_collection = _ao.cast_obj(map_frame, esriCarto.IStandaloneTableCollection)
# iterate the table collection
for i in range(0, table_collection.StandaloneTableCount):
table = _ao.cast_obj(table_collection.get_StandaloneTable(i), esriCarto.IStandaloneTable)
build_table_parts(table)
return tables
def _native_get_map_spatial_ref_code(map_document, map_frame):
import ESRI.ArcGIS.Geometry as esriGeometry
return _ao.cast_obj(map_frame.SpatialReference, esriGeometry.ISpatialReference).FactoryCode
def _native_mxd_exists(mxd_path):
import ESRI.ArcGIS.Carto as esriCarto
map_document = _ao.create_obj(esriCarto.MapDocument, esriCarto.IMapDocument)
exists = map_document.get_IsPresent(mxd_path)
valid = map_document.get_IsMapDocument(mxd_path)
return exists and valid
def _native_document_close(map_document):
import ESRI.ArcGIS.Carto as esriCarto
# Make sure it's a map document
map_document = _ao.cast_obj(map_document, esriCarto.IMapDocument)
map_document.Close()
def _native_document_open(mxd_path):
#import comtypes.gen.esriCarto as esriCarto
import ESRI.ArcGIS.Carto as esriCarto
if _native_mxd_exists(mxd_path):
map_document = _ao.create_obj(esriCarto.MapDocument, esriCarto.IMapDocument)
map_document.Open(mxd_path)
# Maps must be activated in order for all properties to be initialized correctly
maps = _native_list_maps(map_document)
for m in maps:
_native_make_map_frame_active_view(m)
return map_document
else:
raise ValueError("MXD path '{}' not found or document invalid.".format(str(mxd_path)))
def _native_make_map_frame_active_view(map_frame):
import ESRI.ArcGIS.Carto as esriCarto
window_handle = ctypes.windll.user32.GetDesktopWindow()
# cast map frame to active view
active_view = _ao.cast_obj(map_frame, esriCarto.IActiveView)
# make it the active view
active_view.Activate(window_handle)
def _parse_data_source(data_source):
"""Takes a string describing a data source and returns a four-part tuple describing the dataset username, dataset
name, feature class username and feature class name"""
dataset_regex = re.compile(
r"^(?:\\)?(?P<ds_user>[\w]*?)(?:\.)?(?P<ds_name>[\w]*?(?=\\))(?:\\)?(?P<fc_user>[\w]*?(?=\.))(?:\.)(?P<fc_name>[\w]*?)$",
re.IGNORECASE)
r = dataset_regex.search(data_source)
if r == None:
feature_class_regex = re.compile(r"^(?:\\)?(?P<fc_user>[\w]*?(?=\.))(?:\.)(?P<fc_name>[\w]*?)$", re.IGNORECASE)
r = feature_class_regex.search(data_source)
if r == None:
return (None, None, None, data_source)
r = r.groupdict()
return (r.get("ds_user"), r.get("ds_name"), r.get("fc_user"), r.get("fc_name"))
| 36.97582
| 129
| 0.696343
|
c26ea917df4659652504d2f36f9a1088a804a50f
| 716
|
py
|
Python
|
src/main/resources/assets/openpython/opos/v1.1/lib/micropython/shutil.py
|
fossabot/OpenPython
|
8fe3f794f2a6c543d96c1ef5c097ffa18f90b680
|
[
"PSF-2.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 41
|
2018-10-25T06:15:31.000Z
|
2022-02-20T11:20:43.000Z
|
src/main/resources/assets/openpython/opos/v1.1/lib/micropython/shutil.py
|
fossabot/OpenPython
|
8fe3f794f2a6c543d96c1ef5c097ffa18f90b680
|
[
"PSF-2.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 16
|
2018-03-20T12:25:27.000Z
|
2018-03-25T13:34:44.000Z
|
src/main/resources/assets/openpython/opos/v1.1/lib/micropython/shutil.py
|
fossabot/OpenPython
|
8fe3f794f2a6c543d96c1ef5c097ffa18f90b680
|
[
"PSF-2.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 9
|
2020-11-12T10:23:27.000Z
|
2021-04-18T14:46:24.000Z
|
# Reimplement, because CPython3.3 impl is rather bloated
import os
def rmtree(top):
for path, dirs, files in os.walk(top, False):
for f in files:
os.unlink(path + "/" + f)
os.rmdir(path)
def copyfileobj(src, dest, length=512):
if hasattr(src, "readinto"):
buf = bytearray(length)
while True:
sz = src.readinto(buf)
if not sz:
break
if sz == length:
dest.write(buf)
else:
b = memoryview(buf)[:sz]
dest.write(b)
else:
while True:
buf = src.read(length)
if not buf:
break
dest.write(buf)
| 24.689655
| 56
| 0.480447
|
cdf2be01408bcec466128ec45a0099de8db7e66a
| 12,658
|
py
|
Python
|
examples/glut_ex/atoms.py
|
bpedersen2/python-gr
|
664f83d11c5d710aa27e117cc1c2899fbd5582a7
|
[
"RSA-MD"
] | 25
|
2018-02-23T18:11:51.000Z
|
2021-02-16T10:06:47.000Z
|
examples/glut_ex/atoms.py
|
bpedersen2/python-gr
|
664f83d11c5d710aa27e117cc1c2899fbd5582a7
|
[
"RSA-MD"
] | 29
|
2018-01-24T16:40:53.000Z
|
2022-02-21T15:29:48.000Z
|
examples/glut_ex/atoms.py
|
bpedersen2/python-gr
|
664f83d11c5d710aa27e117cc1c2899fbd5582a7
|
[
"RSA-MD"
] | 8
|
2017-11-27T10:28:56.000Z
|
2021-11-03T13:10:12.000Z
|
# -*- coding: utf-8 -*-
"""
This module includes information about elements:
- atom_name_list maps element number to name
- atom_color_list maps element number to rgb color tuple
- atomic_number_dict maps element symbol to element number (not a 1:1, but a n:1 mapping, see Ununbium (UUB)/Copernicum(CN))
- atom_radius_list maps element number to covalent radius in mÅ
"""
atom_name_list = [
None,
"Hydrogen",
"Helium",
"Lithium",
"Beryllium",
"Boron",
"Carbon",
"Nitrogen",
"Oxygen",
"Fluorine",
"Neon",
"Sodium",
"Magnesium",
"Aluminium",
"Silicon",
"Phosphorus",
"Sulfur",
"Chlorine",
"Argon",
"Potassium",
"Calcium",
"Scandium",
"Titanium",
"Vanadium",
"Chromium",
"Manganese",
"Iron",
"Cobalt",
"Nickel",
"Copper",
"Zinc",
"Gallium",
"Germanium",
"Arsenic",
"Selenium",
"Bromine",
"Krypton",
"Rubidium",
"Strontium",
"Yttrium",
"Zirconium",
"Niobium",
"Molybdenum",
"Technetium",
"Ruthenium",
"Rhodium",
"Palladium",
"Silver",
"Cadmium",
"Indium",
"Tin",
"Antimony",
"Tellurium",
"Iodine",
"Xenon",
"Caesium",
"Barium",
"Lanthanum",
"Cerium",
"Praseodymium",
"Neodymium",
"Promethium",
"Samarium",
"Europium",
"Gadolinium",
"Terbium",
"Dysprosium",
"Holmium",
"Erbium",
"Thulium",
"Ytterbium",
"Lutetium",
"Hafnium",
"Tantalum",
"Tungsten",
"Rhenium",
"Osmium",
"Iridium",
"Platinum",
"Gold",
"Mercury",
"Thallium",
"Lead",
"Bismuth",
"Polonium",
"Astatine",
"Radon",
"Francium",
"Radium",
"Actinium",
"Thorium",
"Protactinium",
"Uranium",
"Neptunium",
"Plutonium",
"Americium",
"Curium",
"Berkelium",
"Californium",
"Einsteinium",
"Fermium",
"Mendelevium",
"Nobelium",
"Lawrencium",
"Rutherfordium",
"Dubnium",
"Seaborgium",
"Bohrium",
"Hassium",
"Meitnerium",
"Darmstadtium",
"Roentgenium",
"Copernicium",
"Ununtrium",
"Ununquadium",
"Ununpentium",
"Ununhexium",
"Ununseptium",
"Ununoctium"
]
atom_color_list = [None] * 119
atomic_number_dict = {}
atomic_number_dict["H"] = 1
atom_color_list[1] = (255, 255, 255)
atomic_number_dict["HE"] = 2
atom_color_list[2] = (217, 255, 255)
atomic_number_dict["LI"] = 3
atom_color_list[3] = (204, 128, 255)
atomic_number_dict["BE"] = 4
atom_color_list[4] = (194, 255, 0)
atomic_number_dict["B"] = 5
atom_color_list[5] = (255, 181, 181)
atomic_number_dict["C"] = 6
atom_color_list[6] = (144, 144, 144)
atomic_number_dict["N"] = 7
atom_color_list[7] = (48, 80, 248)
atomic_number_dict["O"] = 8
atom_color_list[8] = (255, 13, 13)
atomic_number_dict["F"] = 9
atom_color_list[9] = (144, 224, 80)
atomic_number_dict["NE"] = 10
atom_color_list[10] = (179, 227, 245)
atomic_number_dict["NA"] = 11
atom_color_list[11] = (171, 92, 242)
atomic_number_dict["MG"] = 12
atom_color_list[12] = (138, 255, 0)
atomic_number_dict["AL"] = 13
atom_color_list[13] = (191, 166, 166)
atomic_number_dict["SI"] = 14
atom_color_list[14] = (240, 200, 160)
atomic_number_dict["P"] = 15
atom_color_list[15] = (255, 128, 0)
atomic_number_dict["S"] = 16
atom_color_list[16] = (255, 255, 48)
atomic_number_dict["CL"] = 17
atom_color_list[17] = (31, 240, 31)
atomic_number_dict["AR"] = 18
atom_color_list[18] = (128, 209, 227)
atomic_number_dict["K"] = 19
atom_color_list[19] = (143, 64, 212)
atomic_number_dict["CA"] = 20
atom_color_list[20] = (61, 225, 0)
atomic_number_dict["SC"] = 21
atom_color_list[21] = (230, 230, 230)
atomic_number_dict["TI"] = 22
atom_color_list[22] = (191, 194, 199)
atomic_number_dict["V"] = 23
atom_color_list[23] = (166, 166, 171)
atomic_number_dict["CR"] = 24
atom_color_list[24] = (138, 153, 199)
atomic_number_dict["MN"] = 25
atom_color_list[25] = (156, 122, 199)
atomic_number_dict["FE"] = 26
atom_color_list[26] = (224, 102, 51)
atomic_number_dict["CO"] = 27
atom_color_list[27] = (240, 144, 160)
atomic_number_dict["NI"] = 28
atom_color_list[28] = (80, 208, 80)
atomic_number_dict["CU"] = 29
atom_color_list[29] = (200, 128, 51)
atomic_number_dict["ZN"] = 30
atom_color_list[30] = (125, 128, 176)
atomic_number_dict["GA"] = 31
atom_color_list[31] = (194, 143, 143)
atomic_number_dict["GE"] = 32
atom_color_list[32] = (102, 143, 143)
atomic_number_dict["AS"] = 33
atom_color_list[33] = (189, 128, 227)
atomic_number_dict["SE"] = 34
atom_color_list[34] = (225, 161, 0)
atomic_number_dict["BR"] = 35
atom_color_list[35] = (166, 41, 41)
atomic_number_dict["KR"] = 36
atom_color_list[36] = (92, 184, 209)
atomic_number_dict["RB"] = 37
atom_color_list[37] = (112, 46, 176)
atomic_number_dict["SR"] = 38
atom_color_list[38] = (0, 255, 0)
atomic_number_dict["Y"] = 39
atom_color_list[39] = (148, 255, 255)
atomic_number_dict["ZR"] = 40
atom_color_list[40] = (148, 224, 224)
atomic_number_dict["NB"] = 41
atom_color_list[41] = (115, 194, 201)
atomic_number_dict["MO"] = 42
atom_color_list[42] = (84, 181, 181)
atomic_number_dict["TC"] = 43
atom_color_list[43] = (59, 158, 158)
atomic_number_dict["RU"] = 44
atom_color_list[44] = (36, 143, 143)
atomic_number_dict["RH"] = 45
atom_color_list[45] = (10, 125, 140)
atomic_number_dict["PD"] = 46
atom_color_list[46] = (0, 105, 133)
atomic_number_dict["AG"] = 47
atom_color_list[47] = (192, 192, 192)
atomic_number_dict["CD"] = 48
atom_color_list[48] = (255, 217, 143)
atomic_number_dict["IN"] = 49
atom_color_list[49] = (166, 117, 115)
atomic_number_dict["SN"] = 50
atom_color_list[50] = (102, 128, 128)
atomic_number_dict["SB"] = 51
atom_color_list[51] = (158, 99, 181)
atomic_number_dict["TE"] = 52
atom_color_list[52] = (212, 122, 0)
atomic_number_dict["I"] = 53
atom_color_list[53] = (148, 0, 148)
atomic_number_dict["XE"] = 54
atom_color_list[54] = (66, 158, 176)
atomic_number_dict["CS"] = 55
atom_color_list[55] = (87, 23, 143)
atomic_number_dict["BA"] = 56
atom_color_list[56] = (0, 201, 0)
atomic_number_dict["LA"] = 57
atom_color_list[57] = (112, 212, 255)
atomic_number_dict["CE"] = 58
atom_color_list[58] = (255, 255, 199)
atomic_number_dict["PR"] = 59
atom_color_list[59] = (217, 225, 199)
atomic_number_dict["ND"] = 60
atom_color_list[60] = (199, 225, 199)
atomic_number_dict["PM"] = 61
atom_color_list[61] = (163, 225, 199)
atomic_number_dict["SM"] = 62
atom_color_list[62] = (143, 225, 199)
atomic_number_dict["EU"] = 63
atom_color_list[63] = (97, 225, 199)
atomic_number_dict["GD"] = 64
atom_color_list[64] = (69, 225, 199)
atomic_number_dict["TB"] = 65
atom_color_list[65] = (48, 225, 199)
atomic_number_dict["DY"] = 66
atom_color_list[66] = (31, 225, 199)
atomic_number_dict["HO"] = 67
atom_color_list[67] = (0, 225, 156)
atomic_number_dict["ER"] = 68
atom_color_list[68] = (0, 230, 117)
atomic_number_dict["TM"] = 69
atom_color_list[69] = (0, 212, 82)
atomic_number_dict["YB"] = 70
atom_color_list[70] = (0, 191, 56)
atomic_number_dict["LU"] = 71
atom_color_list[71] = (0, 171, 36)
atomic_number_dict["HF"] = 72
atom_color_list[72] = (77, 194, 255)
atomic_number_dict["TA"] = 73
atom_color_list[73] = (77, 166, 255)
atomic_number_dict["W"] = 74
atom_color_list[74] = (33, 148, 214)
atomic_number_dict["RE"] = 75
atom_color_list[75] = (38, 125, 171)
atomic_number_dict["OS"] = 76
atom_color_list[76] = (38, 102, 150)
atomic_number_dict["IR"] = 77
atom_color_list[77] = (23, 84, 135)
atomic_number_dict["PT"] = 78
atom_color_list[78] = (208, 208, 224)
atomic_number_dict["AU"] = 79
atom_color_list[79] = (255, 209, 35)
atomic_number_dict["HG"] = 80
atom_color_list[80] = (184, 184, 208)
atomic_number_dict["TL"] = 81
atom_color_list[81] = (166, 84, 77)
atomic_number_dict["PB"] = 82
atom_color_list[82] = (87, 89, 97)
atomic_number_dict["BI"] = 83
atom_color_list[83] = (158, 79, 181)
atomic_number_dict["PO"] = 84
atom_color_list[84] = (171, 92, 0)
atomic_number_dict["AT"] = 85
atom_color_list[85] = (117, 79, 69)
atomic_number_dict["RN"] = 86
atom_color_list[86] = (66, 130, 150)
atomic_number_dict["FR"] = 87
atom_color_list[87] = (66, 0, 102)
atomic_number_dict["RA"] = 88
atom_color_list[88] = (0, 125, 0)
atomic_number_dict["AC"] = 89
atom_color_list[89] = (112, 171, 250)
atomic_number_dict["TH"] = 90
atom_color_list[90] = (0, 186, 255)
atomic_number_dict["PA"] = 91
atom_color_list[91] = (0, 161, 255)
atomic_number_dict["U"] = 92
atom_color_list[92] = (0, 143, 255)
atomic_number_dict["NP"] = 93
atom_color_list[93] = (0, 128, 255)
atomic_number_dict["PU"] = 94
atom_color_list[94] = (0, 107, 255)
atomic_number_dict["AM"] = 95
atom_color_list[95] = (84, 92, 242)
atomic_number_dict["CM"] = 96
atom_color_list[96] = (120, 92, 227)
atomic_number_dict["BK"] = 97
atom_color_list[97] = (138, 79, 227)
atomic_number_dict["CF"] = 98
atom_color_list[98] = (161, 54, 212)
atomic_number_dict["ES"] = 99
atom_color_list[99] = (179, 31, 212)
atomic_number_dict["FM"] = 100
atom_color_list[100] = (179, 31, 186)
atomic_number_dict["MD"] = 101
atom_color_list[101] = (179, 13, 166)
atomic_number_dict["NO"] = 102
atom_color_list[102] = (189, 13, 135)
atomic_number_dict["LR"] = 103
atom_color_list[103] = (199, 0, 102)
atomic_number_dict["RF"] = 104
atom_color_list[104] = (204, 0, 89)
atomic_number_dict["DB"] = 105
atom_color_list[105] = (209, 0, 79)
atomic_number_dict["SG"] = 106
atom_color_list[106] = (217, 0, 69)
atomic_number_dict["BH"] = 107
atom_color_list[107] = (224, 0, 56)
atomic_number_dict["HS"] = 108
atom_color_list[108] = (230, 0, 46)
atomic_number_dict["MT"] = 109
atom_color_list[109] = (235, 0, 38)
atomic_number_dict["DS"] = 110
atom_color_list[110] = (255, 0, 255)
atomic_number_dict["RG"] = 111
atom_color_list[111] = (255, 0, 255)
atomic_number_dict["CN"] = 112
atomic_number_dict["UUB"] = 112
atom_color_list[112] = (255, 0, 255)
atomic_number_dict["UUT"] = 113
atom_color_list[113] = (255, 0, 255)
atomic_number_dict["UUQ"] = 114
atom_color_list[114] = (255, 0, 255)
atomic_number_dict["UUP"] = 115
atom_color_list[115] = (255, 0, 255)
atomic_number_dict["UUH"] = 116
atom_color_list[116] = (255, 0, 255)
atomic_number_dict["UUS"] = 117
atom_color_list[117] = (255, 0, 255)
atomic_number_dict["UUO"] = 118
atom_color_list[118] = (255, 0, 255)
# Source: Jmol constants, units: mÅ
atom_radius_list = [
None,
230, # 1 H
930, # 2 He
680, # 3 Li
350, # 4 Be
830, # 5 B
680, # 6 C
680, # 7 N
680, # 8 O
640, # 9 F
1120, # 10 Ne
970, # 11 Na
1100, # 12 Mg
1350, # 13 Al
1200, # 14 Si
750, # 15 P
1020, # 16 S
990, # 17 Cl
1570, # 18 Ar
1330, # 19 K
990, # 20 Ca
1440, # 21 Sc
1470, # 22 Ti
1330, # 23 V
1350, # 24 Cr
1350, # 25 Mn
1340, # 26 Fe
1330, # 27 Co
1500, # 28 Ni
1520, # 29 Cu
1450, # 30 Zn
1220, # 31 Ga
1170, # 32 Ge
1210, # 33 As
1220, # 34 Se
1210, # 35 Br
1910, # 36 Kr
1470, # 37 Rb
1120, # 38 Sr
1780, # 39 Y
1560, # 40 Zr
1480, # 41 Nb
1470, # 42 Mo
1350, # 43 Tc
1400, # 44 Ru
1450, # 45 Rh
1500, # 46 Pd
1590, # 47 Ag
1690, # 48 Cd
1630, # 49 In
1460, # 50 Sn
1460, # 51 Sb
1470, # 52 Te
1400, # 53 I
1980, # 54 Xe
1670, # 55 Cs
1340, # 56 Ba
1870, # 57 La
1830, # 58 Ce
1820, # 59 Pr
1810, # 60 Nd
1800, # 61 Pm
1800, # 62 Sm
1990, # 63 Eu
1790, # 64 Gd
1760, # 65 Tb
1750, # 66 Dy
1740, # 67 Ho
1730, # 68 Er
1720, # 69 Tm
1940, # 70 Yb
1720, # 71 Lu
1570, # 72 Hf
1430, # 73 Ta
1370, # 74 W
1350, # 75 Re
1370, # 76 Os
1320, # 77 Ir
1500, # 78 Pt
1500, # 79 Au
1700, # 80 Hg
1550, # 81 Tl
1540, # 82 Pb
1540, # 83 Bi
1680, # 84 Po
1700, # 85 At
2400, # 86 Rn
2000, # 87 Fr
1900, # 88 Ra
1880, # 89 Ac
1790, # 90 Th
1610, # 91 Pa
1580, # 92 U
1550, # 93 Np
1530, # 94 Pu
1510, # 95 Am
1500, # 96 Cm
1500, # 97 Bk
1500, # 98 Cf
1500, # 99 Es
1500, # 100 Fm
1500, # 101 Md
1500, # 102 No
1500, # 103 Lr
1600, # 104 Rf
1600, # 105 Db
1600, # 106 Sg
1600, # 107 Bh
1600, # 108 Hs
1600, # 109 Mt
1600, # 110 DS
1600, # 111 RG
1600, # 112 CN
1600, # 113 UUT
1600, # 114 UUQ
1600, # 115 UUP
1600, # 116 UUH
1600, # 117 UUS
1600, # 118 UUO
]
| 25.623482
| 124
| 0.616053
|
bdbccf005b5be986093a453c76c29f40cba6dc9d
| 640
|
py
|
Python
|
tests/settings.py
|
adam-grandt-tts/data-driven-acquisition
|
2f970a2815f90f591203c02c9099642e4cbd24d8
|
[
"CC0-1.0"
] | 1
|
2020-02-14T17:36:27.000Z
|
2020-02-14T17:36:27.000Z
|
tests/settings.py
|
adam-grandt-tts/data-driven-acquisition
|
2f970a2815f90f591203c02c9099642e4cbd24d8
|
[
"CC0-1.0"
] | 20
|
2020-01-21T15:04:16.000Z
|
2021-08-05T16:18:06.000Z
|
tests/settings.py
|
adam-grandt-tts/data-driven-acquisition
|
2f970a2815f90f591203c02c9099642e4cbd24d8
|
[
"CC0-1.0"
] | 2
|
2020-05-10T18:29:54.000Z
|
2021-03-15T18:12:07.000Z
|
# -*- coding: utf-8
from __future__ import unicode_literals, absolute_import
import django
DEBUG = True
USE_TZ = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "2*e6n2i!thwjd*niwb-5)ru*ek_du@8x3e$-fs3(y7yx3xue$r"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
ROOT_URLCONF = "tests.urls"
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"data_driven_acquisition",
]
SITE_ID = 1
if django.VERSION >= (1, 10):
MIDDLEWARE = ()
else:
MIDDLEWARE_CLASSES = ()
| 18.823529
| 66
| 0.664063
|
36a11e4e836aec38edbb0322e490fe5c90827287
| 3,861
|
py
|
Python
|
codes/django/curso-django/webempresa/webempresa/webempresa/settings.py
|
crisconru/snippetshell
|
edd97145029d0d1749845b7b1e90d89dcbfc4506
|
[
"MIT"
] | 1
|
2021-12-17T02:04:16.000Z
|
2021-12-17T02:04:16.000Z
|
codes/django/curso-django/webempresa/webempresa/webempresa/settings.py
|
crisconru/snippetshell
|
edd97145029d0d1749845b7b1e90d89dcbfc4506
|
[
"MIT"
] | 15
|
2019-05-27T19:23:50.000Z
|
2022-03-11T23:53:17.000Z
|
webempresa/webempresa/webempresa/settings.py
|
davichup/web-empresa
|
68791d220d8fd5a950587010ac6966668d12a6de
|
[
"Apache-2.0"
] | 1
|
2018-09-24T12:10:19.000Z
|
2018-09-24T12:10:19.000Z
|
"""
Django settings for webempresa project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%a22^g(n%ct(-5%s5vqc2g5u9f!3ajp0au5t#g=r^p2o!^(7s$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'ckeditor',
'contact',
'core',
'pages.apps.PagesConfig',
'services.apps.ServicesConfig',
'social.apps.SocialConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webempresa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social.processors.ctx_dict'
],
},
},
]
WSGI_APPLICATION = 'webempresa.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# Media config
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Ckeditor
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Custom',
'toolbar_Custom': [
['Bold', 'Italic', 'Underline'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'],
['Link', 'Unlink']
]
}
}
# Email config
EMAIL_HOST = 'smtp.mailtrap.io'
EMAIL_HOST_USER = '73fe412c908dba'
EMAIL_HOST_PASSWORD = 'bb1e89e1349bac'
EMAIL_PORT = '2525'
| 25.74
| 140
| 0.676509
|
199ddc896738818934715fe5986bd506bbb491ef
| 9,729
|
py
|
Python
|
larcv/app/arxiv/arxiv/LArOpenCVHandle/ana/arxiv/track_shower.py
|
mmajewsk/larcv2
|
9ee74e42b293d547d3a8510fa2139b2d4ccf6b89
|
[
"MIT"
] | 14
|
2017-10-19T15:08:29.000Z
|
2021-03-31T21:21:07.000Z
|
larcv/app/arxiv/arxiv/LArOpenCVHandle/ana/arxiv/track_shower.py
|
mmajewsk/larcv2
|
9ee74e42b293d547d3a8510fa2139b2d4ccf6b89
|
[
"MIT"
] | 32
|
2017-10-25T22:54:06.000Z
|
2019-10-01T13:57:15.000Z
|
larcv/app/arxiv/arxiv/LArOpenCVHandle/ana/arxiv/track_shower.py
|
mmajewsk/larcv2
|
9ee74e42b293d547d3a8510fa2139b2d4ccf6b89
|
[
"MIT"
] | 16
|
2017-12-07T12:04:40.000Z
|
2021-11-15T00:53:31.000Z
|
import os, sys
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import root_numpy as rn
from larocv import larocv
rse = ['run','subrun','event']
rsev = ['run','subrun','event','vtxid']
rserv = ['run','subrun','event','roid','vtxid']
# Vertex data frame
dfs = {}
# Event data frame
edfs = {}
mdfs = {}
sample_name = str(sys.argv[1])
sample_file = str(sys.argv[2])
for name,file_ in [(sample_name,sample_file)]:
INPUT_FILE = file_
#
# Vertex wise Trees
#
vertex_df = pd.DataFrame(rn.root2array(INPUT_FILE,treename='VertexTree'))
angle_df = pd.DataFrame(rn.root2array(INPUT_FILE,treename='AngleAnalysis'))
shape_df = pd.DataFrame(rn.root2array(INPUT_FILE,treename='ShapeAnalysis'))
gap_df = pd.DataFrame(rn.root2array(INPUT_FILE,treename="GapAnalysis"))
match_df = pd.DataFrame(rn.root2array(INPUT_FILE,treename="MatchAnalysis"))
dqds_df = pd.DataFrame(rn.root2array(INPUT_FILE,treename="dQdSAnalysis"))
#
# Combine DataFrames
#
comb_df = pd.concat([vertex_df.set_index(rserv),
angle_df.set_index(rserv),
shape_df.set_index(rserv),
gap_df.set_index(rserv),
angle_df.set_index(rserv),
match_df.set_index(rserv),
dqds_df.set_index(rserv)],axis=1)
# print "v",vertex_df.set_index(rserv).index.size
# print "a",angle_df.set_index(rserv).index.size
# print "s",shape_df.set_index(rserv).index.size
# print "g",gap_df.set_index(rserv).index.size
# print "a",angle_df.set_index(rserv).index.size
# print "m",match_df.set_index(rserv).index.size
# print "d",dqds_df.set_index(rserv).index.size
# print
# print "c",comb_df.index.size
# print
#
# Store vertex wise data frame
#
comb_df = comb_df.reset_index()
dfs[name] = comb_df.copy()
#
# Event wise Trees
#
event_vertex_df = pd.DataFrame(rn.root2array(INPUT_FILE,treename="EventVertexTree"))
# mc_df = pd.DataFrame(rn.root2array(INPUT_FILE,treename="MCTree"))
edfs[name] = event_vertex_df.copy()
# mdfs[name] = mc_df.copy()
print "@ sample:",name,"good croi:",event_vertex_df.query("good_croi_ctr>0").index.size
print "total events: ", event_vertex_df.index.size
print
#
# Calculate the 3D opening angle, and 2D XZ projected opening angle
#
for name, comb_df in dfs.iteritems():
# print comb_df.par_trunk_pca_theta_estimate_v
comb_df['cosangle3d']=comb_df.apply(lambda x : larocv.CosOpeningAngle(x['par_trunk_pca_theta_estimate_v'][0],
x['par_trunk_pca_phi_estimate_v'][0],
x['par_trunk_pca_theta_estimate_v'][1],
x['par_trunk_pca_phi_estimate_v'][1]),axis=1)
# unused @ cut level at the moment
#
#comb_df['cosangleXZ']=comb_df.apply(lambda x : larocv.CosOpeningXZAngle(x['par_trunk_pca_theta_estimate_v'][0],
# x['par_trunk_pca_phi_estimate_v'][0],
# x['par_trunk_pca_theta_estimate_v'][1],
# x['par_trunk_pca_phi_estimate_v'][1]),axis=1)
#
# Look @ initial sample
#
d_good_vtx = float(5)
print "@ initial track/shower cut -- vertex (events)"
for name, comb_df in dfs.iteritems():
ts_query = "par1_type != par2_type"
ts_query_g = ts_query + " & scedr < @d_good_vtx"
print "Name....",name
print "Raw.....",comb_df.index.size,",",len(comb_df.groupby(rse))
print "All.....",comb_df.query(ts_query).index.size,",",len(comb_df.query(ts_query).groupby(rse))
print "Good....",comb_df.query(ts_query_g).index.size,",",len(comb_df.query(ts_query_g).groupby(rse))
print "Bad.....",comb_df.query(ts_query_g).index.size,"",len(comb_df.query(ts_query_g).groupby(rse))
print
#
# 1 track & 1 shower assumption from ssnet
#
def track_shower_assumption(df):
df['trkid'] = df.apply(lambda x : 0 if(x['par1_type']==1) else 1,axis=1)
df['shrid'] = df.apply(lambda x : 1 if(x['par2_type']==2) else 0,axis=1)
#
# Print out
#
def vtx_evts(df):
n_vertex_g = float(df.query("scedr < @d_good_vtx").index.size)
n_events_g = float(len(df.query("scedr < @d_good_vtx").groupby(rse)))
n_vertex_b = float(df.query("scedr > @d_good_vtx").index.size)
n_events_b = float(len(df.query("scedr > @d_good_vtx").groupby(rse)))
print "SV: %.03f SE: %.03f (%d,%d)"%(n_vertex_g/start_n_vertex_g,
n_events_g/start_n_events_g,
n_vertex_g,n_events_g)
print "BV: %.03f BE: %.03f (%d,%d)"%(1.0-n_vertex_b/start_n_vertex_b,
1.0-n_events_b/start_n_events_b,
n_vertex_b,n_events_b)
print
sample = sample_name
ts_mdf = dfs[sample].copy()
print "<< Before >>"
print "Good Vertex (Events)",ts_mdf.query("scedr<@d_good_vtx").index.size,len(ts_mdf.query("scedr<@d_good_vtx").groupby(rse))
print "Bad Vertex (Events)",ts_mdf.query("scedr>@d_good_vtx").index.size,len(ts_mdf.query("scedr>@d_good_vtx").groupby(rse))
print
#
# For relative efficiency get the number of good cROI as input
#
start_n_vertex_g = float(ts_mdf.query("scedr<@d_good_vtx").index.size)
start_n_events_g = float(edfs[sample].query("good_croi_ctr>0").index.size)
# start_n_events_g = float(len(ts_mdf.query("scedr<5").groupby(rse)))
start_n_vertex_b = 1.0#float(ts_mdf.query("scedr>@d_good_vtx").index.size)
start_n_events_b = 1.0#float(len(ts_mdf.query("scedr>@d_good_vtx").groupby(rse)))
ts_mdf = ts_mdf.query("par1_type != par2_type")
track_shower_assumption(ts_mdf)
print "<< After assumption >>"
print "Good ",ts_mdf.query("scedr<@d_good_vtx").index.size,len(ts_mdf.query("scedr<@d_good_vtx").groupby(rse))
print "Bad ",ts_mdf.query("scedr>@d_good_vtx").index.size,len(ts_mdf.query("scedr>@d_good_vtx").groupby(rse))
print
ts_mdf['trk_trunk_pca_theta_estimate'] = ts_mdf.apply(lambda x : np.cos(x['par_trunk_pca_theta_estimate_v'][x['trkid']]),axis=1)
ts_mdf['shr_avg_length'] = ts_mdf.apply(lambda x : x['length_v'][x['shrid']] / x['nplanes_v'][x['shrid']],axis=1)
ts_mdf['trk_avg_length'] = ts_mdf.apply(lambda x : x['length_v'][x['trkid']] / x['nplanes_v'][x['trkid']],axis=1)
ts_mdf['shr_avg_width'] = ts_mdf.apply(lambda x : x['width_v'][x['shrid']] / x['nplanes_v'][x['shrid']],axis=1)
ts_mdf['trk_avg_width'] = ts_mdf.apply(lambda x : x['width_v'][x['trkid']] / x['nplanes_v'][x['trkid']],axis=1)
ts_mdf['shr_avg_perimeter'] = ts_mdf.apply(lambda x : x['perimeter_v'][x['shrid']] / x['nplanes_v'][x['shrid']],axis=1)
ts_mdf['trk_avg_perimeter'] = ts_mdf.apply(lambda x : x['perimeter_v'][x['trkid']] / x['nplanes_v'][x['trkid']],axis=1)
ts_mdf['shr_avg_area'] = ts_mdf.apply(lambda x : x['area_v'][x['shrid']] / x['nplanes_v'][x['shrid']],axis=1)
ts_mdf['trk_avg_area'] = ts_mdf.apply(lambda x : x['area_v'][x['trkid']] / x['nplanes_v'][x['trkid']],axis=1)
ts_mdf['shr_avg_npixel'] = ts_mdf.apply(lambda x : x['npixel_v'][x['shrid']] / x['nplanes_v'][x['shrid']],axis=1)
ts_mdf['trk_avg_npixel'] = ts_mdf.apply(lambda x : x['npixel_v'][x['trkid']] / x['nplanes_v'][x['trkid']],axis=1)
ts_df = ts_mdf.copy()
print '<< Track shower assumption >>'
vtx_evts(ts_df)
#
# Generic Cuts
#
query="npar==2"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
query="in_fiducial==1"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
query="pathexists2==1"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
#
# Kinematic Cuts
#
query="cosangle3d >-0.995 and cosangle3d<0.995"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
query="anglediff<176"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
query='trk_trunk_pca_theta_estimate>-0.05'
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
#
# Shower Cuts
#
query="shr_avg_length>20"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
query="shr_avg_width>5"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
query="shr_avg_area>20"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
query="shr_avg_npixel<600"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
#
# Track Cuts
#
query="trk_avg_length<200"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
query="trk_avg_perimeter<500"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
query="trk_avg_npixel<500"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
#
# dQds
#
query="dqds_ratio_01 < 0.55"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
#
# Beta
#
query="dqds_diff_01 > 30"
ts_df = ts_df.query(query)
print "<< {} >>".format(query)
vtx_evts(ts_df)
# query="vertex_n_planes_charge==3"
# ts_df = ts_df.query(query)
# print "<< {} >>".format(query)
# vtx_evts(ts_df)
print "<< After >>"
print "Good Vertex (Events)",ts_df.query("scedr<@d_good_vtx").index.size,len(ts_df.query("scedr<@d_good_vtx").groupby(rse))
print "Bad Vertex (Events)",ts_df.query("scedr>@d_good_vtx").index.size,len(ts_df.query("scedr>@d_good_vtx").groupby(rse))
print
| 32.757576
| 129
| 0.633981
|
7743b7205ca0398d7515163ba68b218682105232
| 21,263
|
py
|
Python
|
quantum/openstack/common/rpc/impl_qpid.py
|
cuiwow/quantum
|
ce11b62046a0501e9fcd8442524d3c151d315dfb
|
[
"Apache-2.0"
] | 1
|
2019-04-11T10:27:47.000Z
|
2019-04-11T10:27:47.000Z
|
quantum/openstack/common/rpc/impl_qpid.py
|
cuiwow/quantum
|
ce11b62046a0501e9fcd8442524d3c151d315dfb
|
[
"Apache-2.0"
] | null | null | null |
quantum/openstack/common/rpc/impl_qpid.py
|
cuiwow/quantum
|
ce11b62046a0501e9fcd8442524d3c151d315dfb
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import time
import uuid
import eventlet
import greenlet
from quantum.openstack.common import cfg
from quantum.openstack.common.gettextutils import _
from quantum.openstack.common import importutils
from quantum.openstack.common import jsonutils
from quantum.openstack.common import log as logging
from quantum.openstack.common.rpc import amqp as rpc_amqp
from quantum.openstack.common.rpc import common as rpc_common
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.StrOpt('qpid_port',
default='5672',
help='Qpid broker port'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection'),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
]
cfg.CONF.register_opts(qpid_opts)
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"name": link_name,
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
addr_opts["link"]["x-declare"].update(link_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.reconnect(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect"""
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def consume(self):
"""Fetch the message and pass it to the callback object"""
message = self.receiver.fetch()
try:
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'"""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
super(DirectConsumer, self).__init__(session, callback,
"%s/%s" % (msg_id, msg_id),
{"type": "direct"},
msg_id,
{"exclusive": True})
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'"""
def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
super(TopicConsumer, self).__init__(session, callback,
"%s/%s" % (exchange_name, topic),
{}, name or topic, {})
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'"""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
super(FanoutConsumer, self).__init__(
session, callback,
"%s_fanout" % topic,
{"durable": False, "type": "fanout"},
"%s_fanout_%s" % (topic, uuid.uuid4().hex),
{"exclusive": True})
class Publisher(object):
"""Base Publisher class"""
def __init__(self, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection"""
self.sender = session.sender(self.address)
def send(self, msg):
"""Send a message"""
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'"""
def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher."""
super(DirectPublisher, self).__init__(session, msg_id,
{"type": "Direct"})
class TopicPublisher(Publisher):
"""Publisher class for 'topic'"""
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(session,
"%s/%s" % (exchange_name, topic))
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'"""
def __init__(self, conf, session, topic):
"""init a 'fanout' publisher.
"""
super(FanoutPublisher, self).__init__(
session,
"%s_fanout" % topic, {"type": "fanout"})
class NotifyPublisher(Publisher):
"""Publisher class for notifications"""
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
super(NotifyPublisher, self).__init__(session,
"%s/%s" % (exchange_name, topic),
{"durable": True})
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.session = None
self.consumers = {}
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
if server_params and 'hostname' in server_params:
# NOTE(russellb) This enables support for cast_to_server.
server_params['qpid_hosts'] = [
'%s:%d' % (server_params['hostname'],
server_params.get('port', 5672))
]
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
params.update(server_params or {})
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
self.connection_create(self.brokers[0])
self.reconnect()
def connection_create(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = self.username
self.connection.password = self.password
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.protocol = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[str(receiver)]
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues"""
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
attempt = 0
delay = 1
while True:
broker = self.brokers[attempt % len(self.brokers)]
attempt += 1
try:
self.connection_create(broker)
self.connection.open()
except qpid_exceptions.ConnectionError, e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(2 * delay, 60)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in consumers.itervalues():
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError), e:
if error_callback:
error_callback(e)
self.reconnect()
def close(self):
"""Close/release this connection"""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.connection.close()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again"""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers"""
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.exception(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread"""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer"""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message"""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg):
"""Send a 'topic' message"""
self.publisher_send(TopicPublisher, topic, msg)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message"""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic"""
self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None):
"""Consume from all queues/consumers"""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread"""
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object"""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
else:
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
self._register_consumer(consumer)
return consumer
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object"""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
self._register_consumer(consumer)
return consumer
def create_connection(conf, new=True):
"""Create a connection"""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
| 34.857377
| 79
| 0.585618
|
0312c5d7f200db3a771a69eb1a38fad0b8c98158
| 2,058
|
py
|
Python
|
tests/python/game.py
|
Helen09/pxt
|
221233c420f34c74885d1b73f4b1b5074b6960aa
|
[
"MIT"
] | 1
|
2019-07-04T23:05:54.000Z
|
2019-07-04T23:05:54.000Z
|
tests/python/game.py
|
LaboratoryForPlayfulComputation/pxt
|
1d58e344faef7d6483cf5c1e40ae6ed73f7759bd
|
[
"MIT"
] | null | null | null |
tests/python/game.py
|
LaboratoryForPlayfulComputation/pxt
|
1d58e344faef7d6483cf5c1e40ae6ed73f7759bd
|
[
"MIT"
] | 2
|
2019-10-29T06:56:11.000Z
|
2021-05-25T10:18:12.000Z
|
class Foo:
def qux2(self):
z = 12
x = z * 3
self.baz = x
for q in range(10):
x += q
lst = ["foo", "bar", "baz"]
lst = lst[1:2]
assert len(lst) == 2, 201
def qux(self):
self.baz = self.bar
self.blah = "hello"
self._priv = 1
self._prot = self.baz
def _prot2(self):
pass
class Bar(Foo):
def something(self):
super()._prot2()
def something2(self):
self._prot = 12
class SpriteKind(Enum):
Player = 0
Projectile = 1
Enemy = 2
Food = 3
ii = img("""
. . . .
. a . .
. b b .
""")
hbuf = hex("a007")
hbuf2 = b'\xB0\x07'
asteroids = [sprites.space.space_small_asteroid1, sprites.space.space_small_asteroid0, sprites.space.space_asteroid0, sprites.space.space_asteroid1, sprites.space.space_asteroid4, sprites.space.space_asteroid3]
ship = sprites.create(sprites.space.space_red_ship, SpriteKind.Player)
ship.set_flag(SpriteFlag.STAY_IN_SCREEN, True)
ship.bottom = 120
controller.move_sprite(ship, 100, 100)
info.set_life(3)
def player_damage(sprite, other_sprite):
scene.camera_shake(4, 500)
other_sprite.destroy(effects.disintegrate)
sprite.start_effect(effects.fire, 200)
info.change_life_by(-1)
sprites.on_overlap(SpriteKind.Player, SpriteKind.Enemy, player_damage)
if False:
player_damage(ship, ship)
def enemy_damage(sprite:Sprite, other_sprite:Sprite):
sprite.destroy()
other_sprite.destroy(effects.disintegrate)
info.change_score_by(1)
sprites.on_overlap(SpriteKind.Projectile, SpriteKind.Enemy, enemy_damage)
def shoot():
projectile = sprites.create_projectile_from_sprite(sprites.food.small_apple, ship, 0, -140)
projectile.start_effect(effects.cool_radial, 100)
controller.A.on_event(ControllerButtonEvent.PRESSED, shoot)
def spawn_enemy():
projectile = sprites.create_projectile_from_side(asteroids[math.random_range(0, asteroids.length - 1)], 0, 75)
projectile.set_kind(SpriteKind.Enemy)
projectile.x = math.random_range(10, 150)
game.on_update_interval(500, spawn_enemy)
def qq():
pass
qq()
| 27.810811
| 210
| 0.706997
|
120618b43322669d3f179966004c4ef7cf7facbf
| 1,990
|
py
|
Python
|
src/Current Models/Misc/shell.py
|
PharaohCola13/Geotesimal
|
45de6fb9a587ae8eb3c85d0acd6b93c36fa7bf24
|
[
"MIT"
] | 3
|
2018-12-13T20:11:18.000Z
|
2022-01-13T13:51:19.000Z
|
src/Current Models/Misc/shell.py
|
PharaohCola13/geometric-models
|
45de6fb9a587ae8eb3c85d0acd6b93c36fa7bf24
|
[
"MIT"
] | 5
|
2018-10-19T18:18:05.000Z
|
2021-06-10T00:20:52.000Z
|
src/Current Models/Misc/shell.py
|
PharaohCola13/geometric-models
|
45de6fb9a587ae8eb3c85d0acd6b93c36fa7bf24
|
[
"MIT"
] | 1
|
2018-10-17T05:32:26.000Z
|
2018-10-17T05:32:26.000Z
|
# A Shell, brought to you by PharaohCola13
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.pyplot as plt
from matplotlib import *
from numpy import *
from mpl_toolkits.mplot3d.art3d import *
from matplotlib.animation import *
name = "Shell"
def shape(fig, alpha, color, edge_c, edge_w, grid, sides, edges, figcolor, rotation, rotmagt, rotmagp, save):
plt.clf()
# Definition of x
def x_(u,v):
x = power(1.2, v) * (sin(u)**2 * sin(v))
return x
# Definition of y
def y_(u,v):
y = power(1.2, v) * (sin(u)**2 * cos(v))
return y
# Definition of z
def z_(u,v):
z = power(1.2, v) * (sin(u) * cos(u))
return z
#Value of the angles
s = sides
u = linspace(0, pi, s + 1)
v = linspace(-pi/4, 5 * pi/2, edges)
u, v = meshgrid(u, v)
# Symbolic representation
x = x_(u,v)
y = y_(u,v)
z = z_(u,v)
# Figure Properties
ax = p3.Axes3D(fig)
ax.set_facecolor(figcolor) # Figure background turns black
# Axis Properties
plt.axis(grid) # Turns off the axis grid
plt.axis('equal')
# Axis Limits
#ax.set_xlim(-1,1)
#ax.set_ylim(-1,1)
#ax.set_zlim(-1,1)
# Surface Plot
shell = ax.plot_surface(x, y, z)
shell.set_alpha(alpha) # Transparency of figure
shell.set_edgecolor(edge_c) # Edge color of the lines on the figure
shell.set_linewidth(edge_w) # Line width of the edges
shell.set_facecolor(color) # General color of the figure
def rot_on():
def animate(i):
ax.view_init(azim=rotmagt * i, elev=rotmagp * i)
if save == "MP4":
# Animate
ani = FuncAnimation(fig, animate, frames=500,
interval=100, save_count=50) # frames=100)#, repeat=True)
Writer = writers['ffmpeg']
writer = Writer(fps=30, bitrate=1800)
ani.save('{}.mp4'.format(name), writer=writer)
else:
#save = None
# Animate
ani = FuncAnimation(fig, animate,
interval=1, save_count=50) # frames=100)#, repeat=True)
pass
plt.ion()
plt.show()
time.sleep(0)
plt.close()
if rotation == "On":
rot_on()
elif rotation == "Off":
pass
| 21.630435
| 109
| 0.657286
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.