hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d45bb94d2afc57e4e809762826d69fe0fa85e890 | 2,174 | py | Python | build/android/gyp/util/md5_check.py | nagineni/chromium-crosswalk | 5725642f1c67d0f97e8613ec1c3e8107ab53fdf8 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231 | 2015-01-08T09:04:44.000Z | 2021-12-30T03:03:10.000Z | build/android/gyp/util/md5_check.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 8 | 2015-08-31T06:39:59.000Z | 2021-12-04T14:53:28.000Z | build/android/gyp/util/md5_check.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268 | 2015-01-21T05:53:28.000Z | 2022-03-25T22:09:01.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import hashlib
import os
def CallAndRecordIfStale(
function, record_path=None, input_paths=[], input_strings=[], force=False):
"""Calls function if the md5sum of the input paths/strings has changed.
The md5sum of the inputs is compared with the one stored in record_path. If
this has changed (or the record doesn't exist), function will be called and
the new md5sum will be recorded.
If force is True, the function will be called regardless of whether the
md5sum is out of date.
"""
md5_checker = _Md5Checker(
record_path=record_path,
input_paths=input_paths,
input_strings=input_strings)
if force or md5_checker.IsStale():
function()
md5_checker.Write()
def _UpdateMd5ForFile(md5, path, block_size=2**16):
with open(path, 'rb') as infile:
while True:
data = infile.read(block_size)
if not data:
break
md5.update(data)
def _UpdateMd5ForDirectory(md5, dir_path):
for root, _, files in os.walk(dir_path):
for f in files:
_UpdateMd5ForFile(md5, os.path.join(root, f))
def _UpdateMd5ForPath(md5, path):
if os.path.isdir(path):
_UpdateMd5ForDirectory(md5, path)
else:
_UpdateMd5ForFile(md5, path)
class _Md5Checker(object):
def __init__(self, record_path=None, input_paths=[], input_strings=[]):
assert record_path.endswith('.stamp'), (
'record paths must end in \'.stamp\' so that they are easy to find '
'and delete')
self.record_path = record_path
md5 = hashlib.md5()
for i in sorted(input_paths):
_UpdateMd5ForPath(md5, i)
for s in input_strings:
md5.update(s)
self.new_digest = md5.hexdigest()
self.old_digest = ''
if os.path.exists(self.record_path):
with open(self.record_path, 'r') as old_record:
self.old_digest = old_record.read()
def IsStale(self):
return self.old_digest != self.new_digest
def Write(self):
with open(self.record_path, 'w') as new_record:
new_record.write(self.new_digest)
| 28.233766 | 79 | 0.695952 |
a968f0bbe5b3eaae4c460419c3751139ca4fa152 | 93 | py | Python | exercices/utils.py | zazbone/PDE | 65a79b2839b54b83692e443bf45716dc6a7737bf | [
"MIT"
] | null | null | null | exercices/utils.py | zazbone/PDE | 65a79b2839b54b83692e443bf45716dc6a7737bf | [
"MIT"
] | null | null | null | exercices/utils.py | zazbone/PDE | 65a79b2839b54b83692e443bf45716dc6a7737bf | [
"MIT"
] | null | null | null | from pathlib import Path
EXP_PATH = Path(__file__).parent
IMAGE_FOLDER = EXP_PATH / "image" | 18.6 | 33 | 0.774194 |
c3b1018552c6d2c741622a24b3629ab6075b450e | 649 | py | Python | ps3_python/svd_proj_matrix.py | andrew-kulikov/intro-to-cv-ud810 | 031c0d36a2f6a5b33264e61bf648ec0bd80c8b49 | [
"MIT"
] | 1 | 2019-01-14T16:59:48.000Z | 2019-01-14T16:59:48.000Z | ps3_python/svd_proj_matrix.py | andrew-kulikov/intro-to-cv-ud810 | 031c0d36a2f6a5b33264e61bf648ec0bd80c8b49 | [
"MIT"
] | null | null | null | ps3_python/svd_proj_matrix.py | andrew-kulikov/intro-to-cv-ud810 | 031c0d36a2f6a5b33264e61bf648ec0bd80c8b49 | [
"MIT"
] | null | null | null | import numpy as np
def svd_proj_matrix(points_3d, points_2d):
#n - amount of points
n = points_3d.shape[0]
A = np.zeros((2 * n, 12), dtype=np.float32)
x = points_2d[:, 0]
y = points_2d[:, 1]
X = points_3d[:, 0]
Y = points_3d[:, 1]
Z = points_3d[:, 2]
zeros = np.zeros(n, dtype=np.float32)
ones = zeros + 1
A[::2, :] = np.column_stack((X, Y, Z, ones, zeros, zeros, zeros, zeros, -x * X, -x * Y, -x * Z, -x))
A[1::2, :] = np.column_stack((zeros, zeros, zeros, zeros, X, Y, Z, ones, -y * X, -y * Y, -y * Z, -y))
_, _, V = np.linalg.svd(A)
M = V.T[:, -1]
M = M.reshape((3, 4))
return M
| 30.904762 | 106 | 0.520801 |
e30336979e0fe51889ca976aa2b5b0e418065c9f | 440 | py | Python | pythontutor-ru/01_inout_and_arithmetic_operations/04_electronic_watch.py | ornichola/learning-new | e567218d8887805e38b1361715d5e3bd51a6bcaf | [
"Unlicense"
] | 2 | 2019-05-24T20:10:16.000Z | 2020-07-11T06:06:43.000Z | pythontutor-ru/01_inout_and_arithmetic_operations/04_electronic_watch.py | ornichola/learning-new | e567218d8887805e38b1361715d5e3bd51a6bcaf | [
"Unlicense"
] | null | null | null | pythontutor-ru/01_inout_and_arithmetic_operations/04_electronic_watch.py | ornichola/learning-new | e567218d8887805e38b1361715d5e3bd51a6bcaf | [
"Unlicense"
] | 21 | 2019-03-11T20:25:05.000Z | 2022-02-28T13:53:10.000Z | '''
http://pythontutor.ru/lessons/inout_and_arithmetic_operations/problems/electronic_watch/
Дано число n. С начала суток прошло n минут. Определите, сколько часов и минут будут показывать электронные часы в этот момент.
Программа должна вывести два числа: количество часов (от 0 до 23) и количество минут (от 0 до 59).
Учтите, что число n может быть больше, чем количество минут в сутках.
'''
n = int(input())
print(n // 60 % 24, n % 60)
| 44 | 127 | 0.759091 |
fdb64115fa95113b0386f1d6440d35b4a4e111ae | 2,553 | py | Python | influence-release-mod/influence/awa_mlp.py | chihkuanyeh/Representer_Point_Selection | c559b32a768f54352e6efe6c246c70e1361b7c2e | [
"MIT"
] | 63 | 2019-02-26T20:15:58.000Z | 2022-03-24T15:59:02.000Z | influence-release-mod/influence/awa_mlp.py | chihkuanyeh/Representer_Point_Selection | c559b32a768f54352e6efe6c246c70e1361b7c2e | [
"MIT"
] | 4 | 2019-04-25T18:30:58.000Z | 2021-09-09T22:05:42.000Z | influence-release-mod/influence/awa_mlp.py | chihkuanyeh/Representer_Point_Selection | c559b32a768f54352e6efe6c246c70e1361b7c2e | [
"MIT"
] | 17 | 2019-04-15T06:39:32.000Z | 2021-05-20T03:25:30.000Z | """
Model for AWA experiments using Resnet features
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import sys
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
import os.path
import time
import IPython
import tensorflow as tf
import math
from influence.genericNeuralNet import GenericNeuralNet, variable, variable_with_weight_decay
from influence.dataset import DataSet
class AWA_MLP(GenericNeuralNet):
## The last layer of net
def __init__(self, input_dim, **kwargs):
self.input_dim = input_dim
super(AWA_MLP, self).__init__(**kwargs)
def get_all_params(self):
all_params = []
for layer in ['fc']:
for var_name in ['weights', 'biases']:
temp_tensor = tf.get_default_graph().get_tensor_by_name("%s/%s:0" % (layer, var_name))
all_params.append(temp_tensor)
return all_params
def retrain(self, num_steps, feed_dict):
retrain_dataset = DataSet(feed_dict[self.input_placeholder], feed_dict[self.labels_placeholder])
for step in xrange(num_steps):
iter_feed_dict = self.fill_feed_dict_with_batch(retrain_dataset)
self.sess.run(self.train_op, feed_dict=iter_feed_dict)
def placeholder_inputs(self):
input_placeholder = tf.placeholder(
tf.float32,
shape=(None, self.input_dim),
name='input_placeholder')
labels_placeholder = tf.placeholder(
tf.int32,
shape=(None),
name='labels_placeholder')
return input_placeholder, labels_placeholder
def inference(self, input_x):
with tf.variable_scope('fc'):
weights3 = variable(
'weights',
[2048 * 50],
tf.contrib.layers.xavier_initializer())
biases3 = variable(
'biases',
[50],
tf.constant_initializer(0.0))
logits = tf.matmul(input_x, tf.reshape(weights3, [2048, 50])) + biases3
return logits
def predictions(self, logits):
preds = tf.nn.softmax(logits, name='preds')
return preds
| 32.316456 | 114 | 0.637681 |
d4a98305ccd54f5946414a461d26acc43a58773e | 533 | py | Python | model_learn/people/migrations/0001_initial.py | knowMandM/django | 2aaf8bb2b4eabc3389427f5bbe17cd9b69829ca1 | [
"MIT"
] | 1 | 2019-04-12T12:57:38.000Z | 2019-04-12T12:57:38.000Z | model_learn/people/migrations/0001_initial.py | knowMandM/django | 2aaf8bb2b4eabc3389427f5bbe17cd9b69829ca1 | [
"MIT"
] | null | null | null | model_learn/people/migrations/0001_initial.py | knowMandM/django | 2aaf8bb2b4eabc3389427f5bbe17cd9b69829ca1 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-03-09 00:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('age', models.IntegerField()),
],
),
]
| 23.173913 | 114 | 0.560976 |
acf5f20833f54e8a644ae9cb61890aa58fd7668d | 124 | py | Python | exercicio24.py | monabrisa/-infosatc-lp-avaliativo-01 | 39d8b97162fa0102db1316b977e960bc07cd7299 | [
"MIT"
] | null | null | null | exercicio24.py | monabrisa/-infosatc-lp-avaliativo-01 | 39d8b97162fa0102db1316b977e960bc07cd7299 | [
"MIT"
] | null | null | null | exercicio24.py | monabrisa/-infosatc-lp-avaliativo-01 | 39d8b97162fa0102db1316b977e960bc07cd7299 | [
"MIT"
] | null | null | null | metros = float(input("Digite um valor em m²: "))
acres = metros * 0.000247
print("{} m² são {} acres".format(metros, acres)) | 41.333333 | 49 | 0.669355 |
31423f49402a394ebc5f9b4029186c0425e10d21 | 1,673 | py | Python | project/api/views/search.py | fael07/Blog-Django-with-CBV | 269747b2e663a34b99acae6368db49c6ad37c2b8 | [
"MIT"
] | null | null | null | project/api/views/search.py | fael07/Blog-Django-with-CBV | 269747b2e663a34b99acae6368db49c6ad37c2b8 | [
"MIT"
] | null | null | null | project/api/views/search.py | fael07/Blog-Django-with-CBV | 269747b2e663a34b99acae6368db49c6ad37c2b8 | [
"MIT"
] | null | null | null | from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from django.core.cache import cache
from Support.Code.actions.Support.utils.main import gets
from django.views.decorators.cache import cache_page
from django.utils.decorators import method_decorator
class SearchApiView(APIView):
@method_decorator(cache_page(60 * 5))
def get(self, request):
search_api_data: dict = cache.get('search_api')
return Response(search_api_data)
def post(self, request):
if 'search' not in request.data.keys():
return Response({'error': 'invalid body'}, status=status.HTTP_400_BAD_REQUEST)
search: str = request.data['search']
searches: dict = cache.get('searches')
if search.lower() in searches.keys():
return Response(searches[search])
search_api_data: dict = cache.get('search_api')
posts, categories, authors, subcategories = gets(search_api_data, 'posts', 'categories', 'authors', 'subcategories', obj_filter='none')
do_search = lambda item: search.lower() in item['title'].lower()
response = {
'posts': list(filter(do_search, posts[:])),
'categories': list(filter(do_search, categories[:])),
'subcategories': list(filter(do_search, subcategories[:])),
'authors': list(filter(do_search, authors[:])),
}
cache.set('searches', {**searches, search.lower(): response}, None)
return Response(response)
| 35.595745 | 143 | 0.621638 |
b97ef3f3c9f5677b8322ccf137c7aefb24e25ca4 | 1,541 | py | Python | products/src/table_update/main.py | DeepHiveMind/aws-serverless-ecommerce-platform | 38429459293e4b07fcaf9ed823f4f009abaccf71 | [
"MIT-0"
] | 1 | 2020-07-18T08:35:45.000Z | 2020-07-18T08:35:45.000Z | products/src/table_update/main.py | DeepHiveMind/aws-serverless-ecommerce-platform | 38429459293e4b07fcaf9ed823f4f009abaccf71 | [
"MIT-0"
] | null | null | null | products/src/table_update/main.py | DeepHiveMind/aws-serverless-ecommerce-platform | 38429459293e4b07fcaf9ed823f4f009abaccf71 | [
"MIT-0"
] | null | null | null | """
TableUpdateFunction
"""
import os
from typing import List
import boto3
from boto3.dynamodb.types import TypeDeserializer
from aws_lambda_powertools.tracing import Tracer
from aws_lambda_powertools.logging.logger import Logger
from ecom.eventbridge import ddb_to_event # pylint: disable=import-error
ENVIRONMENT = os.environ["ENVIRONMENT"]
EVENT_BUS_NAME = os.environ["EVENT_BUS_NAME"]
eventbridge = boto3.client("events") # pylint: disable=invalid-name
type_deserializer = TypeDeserializer() # pylint: disable=invalid-name
logger = Logger() # pylint: disable=invalid-name
tracer = Tracer() # pylint: disable=invalid-name
@tracer.capture_method
def send_events(events: List[dict]):
"""
Send events to EventBridge
"""
logger.info("Sending %d events to EventBridge", len(events))
eventbridge.put_events(Entries=events)
@logger.inject_lambda_context
@tracer.capture_lambda_handler
def handler(event, _):
"""
Lambda function handler for Products Table stream
"""
logger.debug({
"message": "Input event",
"event": event
})
logger.debug({
"message": "Records received",
"records": event.get("Records", [])
})
events = [
ddb_to_event(record, EVENT_BUS_NAME, "ecommerce.products", "Product", "productId")
for record in event.get("Records", [])
]
logger.info("Received %d event(s)", len(events))
logger.debug({
"message": "Events processed from records",
"events": events
})
send_events(events)
| 24.078125 | 90 | 0.693056 |
94881f4f8584cf14e07a7aa2ffa30b31f6e7d316 | 5,497 | py | Python | pyserini/collection/_base.py | printfCalvin/pyserini | fc95f594721d511e1d6e763e8bd58476d759a63d | [
"Apache-2.0"
] | 1 | 2022-02-21T05:14:06.000Z | 2022-02-21T05:14:06.000Z | pyserini/collection/_base.py | printfCalvin/pyserini | fc95f594721d511e1d6e763e8bd58476d759a63d | [
"Apache-2.0"
] | null | null | null | pyserini/collection/_base.py | printfCalvin/pyserini | fc95f594721d511e1d6e763e8bd58476d759a63d | [
"Apache-2.0"
] | null | null | null | #
# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import re
from enum import Enum
from ..multithreading import Counters
from ..pyclass import autoclass, cast, JPaths
logger = logging.getLogger(__name__)
JFileSegment = autoclass('io.anserini.collection.FileSegment')
JSourceDocument = autoclass('io.anserini.collection.SourceDocument')
class JCollections(Enum):
AclAnthology = autoclass('io.anserini.collection.AclAnthology')
CarCollection = autoclass('io.anserini.collection.CarCollection')
Cord19AbstractCollection = autoclass('io.anserini.collection.Cord19AbstractCollection')
ClueWeb09Collection = autoclass('io.anserini.collection.ClueWeb09Collection')
ClueWeb12Collection = autoclass('io.anserini.collection.ClueWeb12Collection')
HtmlCollection = autoclass('io.anserini.collection.HtmlCollection')
JsonCollection = autoclass('io.anserini.collection.JsonCollection')
NewYorkTimesCollection = autoclass('io.anserini.collection.NewYorkTimesCollection')
TrecCollection = autoclass('io.anserini.collection.TrecCollection')
TrecwebCollection = autoclass('io.anserini.collection.TrecwebCollection')
TweetCollection = autoclass('io.anserini.collection.TweetCollection')
WashingtonPostCollection = autoclass('io.anserini.collection.WashingtonPostCollection')
WikipediaCollection = autoclass('io.anserini.collection.WikipediaCollection')
class Collection:
"""
Iterable wrapper class for Anserini's DocumentCollection.
Parameters
----------
collection_class : str
Name of collection class to instantiate
collection_path : str
Path to directory containing collection
"""
def __init__(self, collection_class, collection_path):
self.counters = Counters()
self.collection_class = collection_class
self.collection_path = JPaths.get(collection_path)
self.object = self._get_collection()
self.collection_iterator = self.object.iterator()
def _get_collection(self):
try:
return JCollections[self.collection_class].value(self.collection_path)
except:
raise ValueError(self.collection_class)
def __iter__(self):
return self
def __next__(self):
if self.collection_iterator.hasNext():
fs = self.collection_iterator.next()
return FileSegment(self, fs, fs.getSegmentPath())
else:
raise StopIteration
class FileSegment:
"""
Iterable wrapper class for Anserini's FileSegment.
Parameters
----------
collection : Collection
Parent collection of the file segment
segment : JFileSegment
FileSegment object to create wrapper from
segment_path : str
Path to file backing the file segment
"""
def __init__(self, collection, segment, segment_path):
self.collection = collection
try:
self.object = cast(collection.object.getClass().getName() +
'$Segment', segment)
except:
logger.exception('Exception from casting FileSegment type...')
self.object = cast('io.anserini.collection.FileSegment', segment)
self.segment_iterator = self.object.iterator()
self.segment_path = segment_path
self.segment_name = re.sub(r'\\|\/', '-', collection.collection_path.relativize(segment_path).toString())
def __iter__(self):
return self
def __next__(self):
if self.object.iterator().hasNext():
d = self.object.iterator().next()
return SourceDocument(self, d)
else:
# log if iteration stopped by error
if self.object.getErrorStatus():
logger.error(self.segment_name + ': Error from segment iteration, stopping...')
self.collection.counters.errors.increment()
# stop iteration and log skipped documents
skipped = self.object.getSkippedCount()
if skipped > 0:
self.collection.counters.skips.increment(skipped)
logger.warning(self.segment_name + ': ' + str(skipped) + ' documents skipped')
self.object.close()
raise StopIteration
class SourceDocument:
"""
Wrapper class for Anserini's SourceDocument.
Parameters
----------
segment : FileSegment
Parent segment of the source document
document : io.anserini.collection.SourceDocument
SourceDocument object to create wrapper from
"""
def __init__(self, segment, document):
if not isinstance(document, JSourceDocument):
raise TypeError('Invalid JSourceDocument!')
self.segment = segment
self.object = document
self.id = self.object.id()
self.indexable = self.object.indexable()
self.contents = self.object.contents()
self.raw = self.object.raw()
| 35.694805 | 113 | 0.68692 |
cf8a1451917b71e663912de8e99fe3b5782e4596 | 1,866 | py | Python | dataflow/model/notebooks/Master_pipeline_runner.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 5 | 2021-08-10T23:16:44.000Z | 2022-03-17T17:27:00.000Z | dataflow/model/notebooks/Master_pipeline_runner.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 330 | 2021-06-10T17:28:22.000Z | 2022-03-31T00:55:48.000Z | dataflow/model/notebooks/Master_pipeline_runner.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 6 | 2021-06-10T17:20:32.000Z | 2022-03-28T08:08:03.000Z | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
# %load_ext autoreload
# %autoreload 2
import logging
import os
import core.config as cconfig
import dataflow as cdataf
import helpers.hdbg as hdbg
import helpers.henv as henv
import helpers.hpickle as hpickle
import helpers.hprint as hprint
# %%
hdbg.init_logger(verbosity=logging.INFO)
_LOG = logging.getLogger(__name__)
_LOG.info("%s", henv.get_system_signature()[0])
hprint.config_notebook()
# %%
config = cconfig.get_config_from_env()
# %%
dag_config = config.pop("DAG")
# %%
dag_runner = cdataf.PredictionDagRunner(dag_config, config["meta"]["dag_builder"])
# %%
cdataf.draw(dag_runner.dag)
# %%
if "set_fit_intervals" in config["meta"].to_dict():
dag_runner.set_fit_intervals(
**config["meta", "set_fit_intervals", "func_kwargs"].to_dict()
)
if "set_predict_intervals" in config["meta"].to_dict():
dag_runner.set_predict_intervals(
**config["meta", "set_predict_intervals", "func_kwargs"].to_dict()
)
# %%
fit_result_bundle = dag_runner.fit()
# %%
payload = cconfig.get_config_from_nested_dict({"config": config})
# %%
if "run_oos" in config["meta"].to_dict().keys() and config["meta"]:
result_bundle = dag_runner.predict()
payload["fit_result_bundle"] = fit_result_bundle.to_config()
else:
result_bundle = fit_result_bundle
# %%
result_bundle.payload = payload
# %%
try:
path = os.path.join(
config["meta", "experiment_result_dir"], "result_bundle.pkl"
)
if True:
hpickle.to_pickle(result_bundle.to_config().to_dict(), path)
except AssertionError:
_LOG.warning("Unable to serialize results.")
| 21.952941 | 82 | 0.690247 |
79d8324d65d001b331de92c7722aa1fdb47fdfb7 | 339 | py | Python | mol_shrink_ray/__init__.py | jeff231li/mol_shrink_ray | 8eaf1182095052e99b0aa779aac5574b1fa3adba | [
"MIT"
] | null | null | null | mol_shrink_ray/__init__.py | jeff231li/mol_shrink_ray | 8eaf1182095052e99b0aa779aac5574b1fa3adba | [
"MIT"
] | 1 | 2021-09-17T18:19:01.000Z | 2021-09-17T18:19:01.000Z | mol_shrink_ray/__init__.py | jeff231li/molecular_shrink_ray | 8eaf1182095052e99b0aa779aac5574b1fa3adba | [
"MIT"
] | null | null | null | """
I Shrunk me Molecule!
A module to shrink a molecule/ligand during MD simulations in OpenMM
"""
# Add imports here
from .mol_shrink_ray import *
# Handle versioneer
from ._version import get_versions
versions = get_versions()
__version__ = versions['version']
__git_revision__ = versions['full-revisionid']
del get_versions, versions
| 22.6 | 68 | 0.781711 |
89b7a30e037fe451dd9627c679124548a2028ee0 | 3,005 | py | Python | azure-mgmt-redis/azure/mgmt/redis/models/redis_update_parameters_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-redis/azure/mgmt/redis/models/redis_update_parameters_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-redis/azure/mgmt/redis/models/redis_update_parameters_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RedisUpdateParameters(Model):
"""Parameters supplied to the Update Redis operation.
:param redis_configuration: All Redis Settings. Few possible keys:
rdb-backup-enabled,rdb-storage-connection-string,rdb-backup-frequency,maxmemory-delta,maxmemory-policy,notify-keyspace-events,maxmemory-samples,slowlog-log-slower-than,slowlog-max-len,list-max-ziplist-entries,list-max-ziplist-value,hash-max-ziplist-entries,hash-max-ziplist-value,set-max-intset-entries,zset-max-ziplist-entries,zset-max-ziplist-value
etc.
:type redis_configuration: dict[str, str]
:param enable_non_ssl_port: Specifies whether the non-ssl Redis server
port (6379) is enabled.
:type enable_non_ssl_port: bool
:param tenant_settings: A dictionary of tenant settings
:type tenant_settings: dict[str, str]
:param shard_count: The number of shards to be created on a Premium
Cluster Cache.
:type shard_count: int
:param minimum_tls_version: Optional: requires clients to use a specified
TLS version (or higher) to connect (e,g, '1.0', '1.1', '1.2'). Possible
values include: '1.0', '1.1', '1.2'
:type minimum_tls_version: str or ~azure.mgmt.redis.models.TlsVersion
:param sku: The SKU of the Redis cache to deploy.
:type sku: ~azure.mgmt.redis.models.Sku
:param tags: Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'redis_configuration': {'key': 'properties.redisConfiguration', 'type': '{str}'},
'enable_non_ssl_port': {'key': 'properties.enableNonSslPort', 'type': 'bool'},
'tenant_settings': {'key': 'properties.tenantSettings', 'type': '{str}'},
'shard_count': {'key': 'properties.shardCount', 'type': 'int'},
'minimum_tls_version': {'key': 'properties.minimumTlsVersion', 'type': 'str'},
'sku': {'key': 'properties.sku', 'type': 'Sku'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, *, redis_configuration=None, enable_non_ssl_port: bool=None, tenant_settings=None, shard_count: int=None, minimum_tls_version=None, sku=None, tags=None, **kwargs) -> None:
super(RedisUpdateParameters, self).__init__(**kwargs)
self.redis_configuration = redis_configuration
self.enable_non_ssl_port = enable_non_ssl_port
self.tenant_settings = tenant_settings
self.shard_count = shard_count
self.minimum_tls_version = minimum_tls_version
self.sku = sku
self.tags = tags
| 50.932203 | 355 | 0.669884 |
1df7e947c124e8c636fa1dc9ec24312bbbbedcd3 | 2,967 | py | Python | alx/topk.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-13T21:48:52.000Z | 2022-03-13T21:48:52.000Z | alx/topk.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | alx/topk.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for approximate TopK."""
import jax
from jax import numpy as jnp
@jax.vmap
def slice_2d(x, y):
return x[y]
def top_k_approx(scores, k=100):
"""Returns approximate topk highest scores for each row.
The api is same as jax.lax.top_k, so this can be used as a drop in replacement
as long as num dims of scores tensor is 2. For more dimensions, please use one
or more vmap(s) to be able to use it.
In essence, we perform jnp.max operation, which can be thought of as
lossy top 1, on fixed length window of items. We can control the amound of
approximation by changing the window length. Smaller it gets, the
approximation gets better but at the cost of performance.
Once we have the max for all the windows, we apply regular slow but exact
jax.lax.top_k over reduced set of items.
Args:
scores: [num_rows, num_cols] shaped tensor. Will return top K over last dim.
k: How many top scores to return for each row.
Returns:
Topk scores, topk ids. Both shaped [num_rows, k]
"""
num_queries = scores.shape[0]
num_items = scores.shape[1]
# Make this bigger to improve recall. Should be between [1, k].
num_windows_multiplier = 5
window_lengths = num_items // k // num_windows_multiplier + 1
padded_num_items = k * num_windows_multiplier * window_lengths
print(f"scores shape: {scores.shape}")
print(f"padded_num_items: {padded_num_items}")
print(f"num_items: {num_items}")
scores = jnp.pad(
scores, ((0, 0), (0, padded_num_items - num_items)),
mode="constant",
constant_values=jnp.NINF)
scores = jnp.reshape(
scores, (num_queries, k * num_windows_multiplier, window_lengths))
approx_top_local_scores = jnp.max(scores, axis=2)
sorted_approx_top_scores_across_local = jnp.flip(
jnp.sort(approx_top_local_scores, axis=1), axis=1)
approx_top_ids_across_local = jnp.flip(
jnp.argsort(approx_top_local_scores, axis=1), axis=1)[:, :k]
approx_top_local_ids = jnp.argmax(scores, axis=2)
offsets = jnp.arange(0, padded_num_items, window_lengths)
approx_top_ids_with_offsets = approx_top_local_ids + offsets
approx_top_ids = slice_2d(approx_top_ids_with_offsets,
approx_top_ids_across_local)
topk_scores = sorted_approx_top_scores_across_local[:, :k]
topk_ids = approx_top_ids
return topk_scores, topk_ids
| 35.321429 | 80 | 0.734749 |
a2b2890cf63271f0551d9e024ef4e015e258268c | 640 | py | Python | tests/test_pdf_example.py | simonthor/zfit-physics | b7702da4182812925bf53038de438f4d90168bc3 | [
"BSD-3-Clause"
] | 7 | 2019-03-31T17:04:36.000Z | 2021-04-13T10:29:25.000Z | tests/test_pdf_example.py | simonthor/zfit-physics | b7702da4182812925bf53038de438f4d90168bc3 | [
"BSD-3-Clause"
] | 7 | 2019-05-23T09:59:05.000Z | 2021-09-13T20:49:51.000Z | tests/test_pdf_example.py | simonthor/zfit-physics | b7702da4182812925bf53038de438f4d90168bc3 | [
"BSD-3-Clause"
] | 2 | 2020-02-06T03:23:38.000Z | 2021-03-06T18:01:22.000Z | """Example test for a pdf or function."""
import zfit
from zfit.core.testing import tester
import zfit_physics as zphys
# specify globals here. Do NOT add any TensorFlow but just pure python
param1_true = 0.3
param2_true = 1.2
def test_special_property1():
# test special properties here
assert True
# register the pdf here and provide sets of working parameter configurations
def gauss_params_factory():
mu = zfit.Parameter("mu", param1_true)
sigma = zfit.Parameter("sigma", param2_true)
return {"mu": mu, "sigma": sigma}
tester.register_pdf(pdf_class=zfit.pdf.Gauss, params_factories=gauss_params_factory)
| 22.857143 | 84 | 0.75 |
8e51ea28cbf72fc7e582baa9bad6eae9aaeb9706 | 1,429 | py | Python | examples/custom-scripts/h264-videowriter-test.py | aviogit/depthai-python | ffeb646dff0819177b09f0dd8eb9720b154e7845 | [
"MIT"
] | null | null | null | examples/custom-scripts/h264-videowriter-test.py | aviogit/depthai-python | ffeb646dff0819177b09f0dd8eb9720b154e7845 | [
"MIT"
] | null | null | null | examples/custom-scripts/h264-videowriter-test.py | aviogit/depthai-python | ffeb646dff0819177b09f0dd8eb9720b154e7845 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import cv2
in_fn = '../video.h265'
out_fn = 'video.mp4'
in_cap = cv2.VideoCapture(in_fn)
out_cap = cv2.VideoWriter(out_fn, cv2.VideoWriter.fourcc('A','V','C','1'), 30, (3840, 2160))
#out_cap = cv2.VideoWriter(out_fn, 0x21, 30, (3840, 2160))
while True:
inret, inframe = in_cap.read()
print(inret)
insize = (inframe.shape[1], inframe.shape[0])
print(inret, insize)
out_cap.write(inframe)
'''
#include <iostream> // for standard I/O
#include <string> // for strings
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat)
#include <opencv2/highgui/highgui.hpp> // Video write
using namespace std;
using namespace cv;
int main()
{
VideoWriter outputVideo; // For writing the video
int width = ...; // Declare width here
int height = ...; // Declare height here
Size S = Size(width, height); // Declare Size structure
// Open up the video for writing
const string filename = ...; // Declare name of file here
// Declare FourCC code - OpenCV 2.x
// int fourcc = CV_FOURCC('H','2','6','4');
// Declare FourCC code - OpenCV 3.x and beyond
int fourcc = VideoWriter::fourcc('H','2','6','4');
// Declare FPS here
double fps = ...;
outputVideo.open(filename, fourcc, fps, S);
// Put your processing code here
// ...
// Logic to write frames here... see below for more details
// ...
return 0;
}
'''
| 24.220339 | 92 | 0.635409 |
c768c02418dcc25fec5cf52a15e0a57b4fbc8617 | 32 | py | Python | sequential.py | vector8188/AlgorithmAnalysisPython | 026ca8bf846a504c5eae1677680306b0462b49b9 | [
"MIT"
] | 1 | 2018-02-01T21:54:48.000Z | 2018-02-01T21:54:48.000Z | sequential.py | vector8188/AlgorithmAnalysisPython | 026ca8bf846a504c5eae1677680306b0462b49b9 | [
"MIT"
] | null | null | null | sequential.py | vector8188/AlgorithmAnalysisPython | 026ca8bf846a504c5eae1677680306b0462b49b9 | [
"MIT"
] | null | null | null | def binary_search(alist,item):
| 16 | 30 | 0.78125 |
fc9e7624fa62bb4e6ada49d59212acd39c205d79 | 118,561 | py | Python | src/sqlfluff/dialects/dialect_tsql.py | kiri1701/sqlfluff | 93d109d87f327037efe6fa30f2f7eea8d44f7e91 | [
"MIT"
] | null | null | null | src/sqlfluff/dialects/dialect_tsql.py | kiri1701/sqlfluff | 93d109d87f327037efe6fa30f2f7eea8d44f7e91 | [
"MIT"
] | null | null | null | src/sqlfluff/dialects/dialect_tsql.py | kiri1701/sqlfluff | 93d109d87f327037efe6fa30f2f7eea8d44f7e91 | [
"MIT"
] | null | null | null | """The MSSQL T-SQL dialect.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/language-elements-transact-sql
"""
from sqlfluff.core.parser import (
BaseSegment,
Sequence,
OneOf,
Bracketed,
Ref,
Nothing,
RegexLexer,
CodeSegment,
RegexParser,
Delimited,
Matchable,
NamedParser,
OptionallyBracketed,
Dedent,
BaseFileSegment,
Indent,
AnyNumberOf,
CommentSegment,
SegmentGenerator,
Conditional,
)
from sqlfluff.core.dialects import load_raw_dialect
from sqlfluff.dialects.dialect_tsql_keywords import (
RESERVED_KEYWORDS,
UNRESERVED_KEYWORDS,
)
from sqlfluff.core.parser.segments.raw import NewlineSegment, WhitespaceSegment
from sqlfluff.dialects import dialect_ansi as ansi
ansi_dialect = load_raw_dialect("ansi")
tsql_dialect = ansi_dialect.copy_as("tsql")
tsql_dialect.sets("reserved_keywords").clear()
tsql_dialect.sets("unreserved_keywords").clear()
tsql_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS)
tsql_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS)
# Set the datetime units
tsql_dialect.sets("datetime_units").clear()
tsql_dialect.sets("datetime_units").update(
[
"D",
"DAY",
"DAYOFYEAR",
"DD",
"DW",
"DY",
"HH",
"HOUR",
"M",
"MCS",
"MI",
"MICROSECOND",
"MILLISECOND",
"MINUTE",
"MM",
"MONTH",
"MS",
"N",
"NANOSECOND",
"NS",
"Q",
"QQ",
"QUARTER",
"S",
"SECOND",
"SS",
"W",
"WEEK",
"WEEKDAY",
"WK",
"WW",
"YEAR",
"Y",
"YY",
"YYYY",
]
)
tsql_dialect.sets("date_part_function_name").clear()
tsql_dialect.sets("date_part_function_name").update(
["DATEADD", "DATEDIFF", "DATEDIFF_BIG", "DATENAME"]
)
tsql_dialect.insert_lexer_matchers(
[
RegexLexer(
"atsign",
r"[@][a-zA-Z0-9_]+",
CodeSegment,
),
RegexLexer(
"var_prefix",
r"[$][a-zA-Z0-9_]+",
CodeSegment,
),
RegexLexer(
"square_quote",
r"\[([^\[\]]*)*\]",
CodeSegment,
),
# T-SQL unicode strings
RegexLexer("single_quote_with_n", r"N'([^']|'')*'", CodeSegment),
RegexLexer(
"hash_prefix",
r"[#][#]?[a-zA-Z0-9_]+",
CodeSegment,
),
],
before="back_quote",
)
tsql_dialect.patch_lexer_matchers(
[
# Patching single_quote to allow for TSQL-style escaped quotes
RegexLexer("single_quote", r"'([^']|'')*'", CodeSegment),
# Patching comments to remove hash comments
RegexLexer(
"inline_comment",
r"(--)[^\n]*",
CommentSegment,
segment_kwargs={"trim_start": ("--")},
),
# Patching block comments to account for nested blocks.
# N.B. this syntax is only possible via the non-standard-library
# (but still backwards compatible) `regex` package.
# https://pypi.org/project/regex/
# Pattern breakdown:
# /\* Match opening slash.
# (?> Atomic grouping
# (https://www.regular-expressions.info/atomic.html).
# [^*/]+ Non forward-slash or asterisk characters.
# |\*(?!\/) Negative lookahead assertion to match
# asterisks not followed by a forward-slash.
# |/[^*] Match lone forward-slashes not followed by an asterisk.
# )* Match any number of the atomic group contents.
# (?>
# (?R) Recusively match the block comment pattern
# to match nested block comments.
# (?>
# [^*/]+
# |\*(?!\/)
# |/[^*]
# )*
# )*
# \*/ Match closing slash.
RegexLexer(
"block_comment",
r"/\*(?>[^*/]+|\*(?!\/)|/[^*])*(?>(?R)(?>[^*/]+|\*(?!\/)|/[^*])*)*\*/",
CommentSegment,
subdivider=RegexLexer(
"newline",
r"\r\n|\n",
NewlineSegment,
),
trim_post_subdivide=RegexLexer(
"whitespace",
r"[^\S\r\n]+",
WhitespaceSegment,
),
),
RegexLexer(
"code", r"[0-9a-zA-Z_#@]+", CodeSegment
), # overriding to allow hash mark and at-sign in code
]
)
tsql_dialect.add(
BracketedIdentifierSegment=NamedParser(
"square_quote", CodeSegment, name="quoted_identifier", type="identifier"
),
HashIdentifierSegment=NamedParser(
"hash_prefix", CodeSegment, name="hash_identifier", type="identifier"
),
VariableIdentifierSegment=NamedParser(
"var_prefix", CodeSegment, name="variable_identifier", type="identifier"
),
BatchDelimiterGrammar=Ref("GoStatementSegment"),
QuotedLiteralSegmentWithN=NamedParser(
"single_quote_with_n", CodeSegment, name="quoted_literal", type="literal"
),
TransactionGrammar=OneOf(
"TRANSACTION",
"TRAN",
),
SystemVariableSegment=RegexParser(
r"@@[A-Za-z0-9_]+", CodeSegment, name="system_variable", type="system_variable"
),
StatementAndDelimiterGrammar=Sequence(
Ref("StatementSegment"),
Ref("DelimiterGrammar", optional=True),
),
OneOrMoreStatementsGrammar=AnyNumberOf(
Ref("StatementAndDelimiterGrammar"),
min_times=1,
),
)
tsql_dialect.replace(
# Overriding to cover TSQL allowed identifier name characters
# https://docs.microsoft.com/en-us/sql/relational-databases/databases/database-identifiers?view=sql-server-ver15
NakedIdentifierSegment=SegmentGenerator(
# Generate the anti template from the set of reserved keywords
lambda dialect: RegexParser(
r"[A-Z_][A-Z0-9_@$#]*",
CodeSegment,
name="naked_identifier",
type="identifier",
anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$",
)
),
# Overring ANSI BaseExpressionElement to remove Interval Expression Segment
BaseExpressionElementGrammar=OneOf(
Ref("LiteralGrammar"),
Ref("BareFunctionSegment"),
Ref("FunctionSegment"),
Ref("ColumnReferenceSegment"),
Ref("ExpressionSegment"),
),
SingleIdentifierGrammar=OneOf(
Ref("NakedIdentifierSegment"),
Ref("QuotedIdentifierSegment"),
Ref("BracketedIdentifierSegment"),
Ref("HashIdentifierSegment"),
Ref("ParameterNameSegment"),
Ref("VariableIdentifierSegment"),
),
LiteralGrammar=OneOf(
Ref("QuotedLiteralSegment"),
Ref("QuotedLiteralSegmentWithN"),
Ref("NumericLiteralSegment"),
Ref("BooleanLiteralGrammar"),
Ref("QualifiedNumericLiteralSegment"),
# NB: Null is included in the literals, because it is a keyword which
# can otherwise be easily mistaken for an identifier.
Ref("NullLiteralSegment"),
Ref("DateTimeLiteralGrammar"),
Ref("ParameterNameSegment"),
Ref("SystemVariableSegment"),
),
ParameterNameSegment=RegexParser(
r"@[A-Za-z0-9_]+", CodeSegment, name="parameter", type="parameter"
),
FunctionParameterGrammar=Sequence(
Ref("ParameterNameSegment", optional=True),
Sequence("AS", optional=True),
Ref("DatatypeSegment"),
Sequence(Ref("EqualsSegment"), Ref("ExpressionSegment"), optional=True),
),
FunctionNameIdentifierSegment=SegmentGenerator(
# Generate the anti template from the set of reserved keywords
# minus the function names that are reserved words.
lambda dialect: RegexParser(
r"[A-Z][A-Z0-9_]*|\[[A-Z][A-Z0-9_]*\]",
CodeSegment,
name="function_name_identifier",
type="function_name_identifier",
anti_template=r"^("
+ r"|".join(
dialect.sets("reserved_keywords")
- {
"COALESCE",
"CONVERT",
"CURRENT_TIMESTAMP",
"CURRENT_USER",
"LEFT",
"NULLIF",
"RIGHT",
"SESSION_USER",
"SYSTEM_USER",
}
)
+ r")$",
)
),
# Override ANSI IsClauseGrammar to remove TSQL non-keyword NAN
IsClauseGrammar=OneOf(
"NULL",
Ref("BooleanLiteralGrammar"),
),
DatatypeIdentifierSegment=SegmentGenerator(
# Generate the anti template reserved keywords
lambda dialect: RegexParser(
r"[A-Z][A-Z0-9_]*|\[[A-Z][A-Z0-9_]*\]",
CodeSegment,
name="data_type_identifier",
type="data_type_identifier",
# anti_template=r"^(NOT)$",
anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$",
# TODO - this is a stopgap until we implement explicit data types
),
),
PrimaryKeyGrammar=Sequence(
OneOf(
Sequence(
"PRIMARY",
"KEY",
),
"UNIQUE",
),
OneOf(
"CLUSTERED",
"NONCLUSTERED",
optional=True,
),
),
# Overriding SelectClauseSegmentGrammar to remove Delimited logic which assumes
# statements have been delimited
SelectClauseSegmentGrammar=Sequence(
"SELECT",
Ref("SelectClauseModifierSegment", optional=True),
Indent,
Delimited(
Ref("SelectClauseElementSegment"),
),
# NB: The Dedent for the indent above lives in the
# SelectStatementSegment so that it sits in the right
# place corresponding to the whitespace.
),
FromClauseTerminatorGrammar=OneOf(
"WHERE",
"LIMIT",
Sequence("GROUP", "BY"),
Sequence("ORDER", "BY"),
"HAVING",
"PIVOT",
"UNPIVOT",
Ref("SetOperatorSegment"),
Ref("WithNoSchemaBindingClauseSegment"),
Ref("DelimiterGrammar"),
),
# Replace ANSI LikeGrammar to remove TSQL non-keywords RLIKE and ILIKE
LikeGrammar=Sequence(
"LIKE",
),
# Replace ANSI FunctionContentsGrammar to remove TSQL non-keyword Separator
# TODO: fully represent TSQL functionality
FunctionContentsGrammar=AnyNumberOf(
Ref("ExpressionSegment"),
# A Cast-like function
Sequence(Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment")),
# An extract-like or substring-like function
Sequence(
OneOf(Ref("DatetimeUnitSegment"), Ref("ExpressionSegment")),
"FROM",
Ref("ExpressionSegment"),
),
Sequence(
# Allow an optional distinct keyword here.
Ref.keyword("DISTINCT", optional=True),
OneOf(
# Most functions will be using the delimited route
# but for COUNT(*) or similar we allow the star segment
# here.
Ref("StarSegment"),
Delimited(Ref("FunctionContentsExpressionGrammar")),
),
),
Ref("OrderByClauseSegment"),
# used by string_agg (postgres), group_concat (exasol),listagg (snowflake)...
# like a function call: POSITION ( 'QL' IN 'SQL')
Sequence(
OneOf(
Ref("QuotedLiteralSegment"),
Ref("SingleIdentifierGrammar"),
Ref("ColumnReferenceSegment"),
),
"IN",
OneOf(
Ref("QuotedLiteralSegment"),
Ref("SingleIdentifierGrammar"),
Ref("ColumnReferenceSegment"),
),
),
Sequence(OneOf("IGNORE", "RESPECT"), "NULLS"),
),
JoinKeywordsGrammar=OneOf("JOIN", "APPLY", Sequence("OUTER", "APPLY")),
NaturalJoinKeywordsGrammar=Nothing(),
NestedJoinGrammar=Sequence(
Indent,
Ref("JoinClauseSegment"),
Dedent,
),
# Replace Expression_D_Grammar to remove casting syntax invalid in TSQL
Expression_D_Grammar=Sequence(
OneOf(
Ref("BareFunctionSegment"),
Ref("FunctionSegment"),
Bracketed(
OneOf(
# We're using the expression segment here rather than the grammar so
# that in the parsed structure we get nested elements.
Ref("ExpressionSegment"),
Ref("SelectableGrammar"),
Delimited(
Ref(
"ColumnReferenceSegment"
), # WHERE (a,b,c) IN (select a,b,c FROM...)
Ref(
"FunctionSegment"
), # WHERE (a, substr(b,1,3)) IN (select c,d FROM...)
Ref("LiteralGrammar"), # WHERE (a, 2) IN (SELECT b, c FROM ...)
),
ephemeral_name="BracketedExpression",
),
),
# Allow potential select statement without brackets
Ref("SelectStatementSegment"),
Ref("LiteralGrammar"),
Ref("ColumnReferenceSegment"),
Sequence(
Ref("SimpleArrayTypeGrammar", optional=True), Ref("ArrayLiteralSegment")
),
),
Ref("Accessor_Grammar", optional=True),
allow_gaps=True,
),
MergeIntoLiteralGrammar=Sequence(
"MERGE",
Sequence(
"TOP",
OptionallyBracketed(Ref("ExpressionSegment")),
Ref.keyword("PERCENT", optional=True),
optional=True,
),
Ref.keyword("INTO", optional=True),
),
TrimParametersGrammar=Nothing(),
TemporaryGrammar=Nothing(),
)
class StatementSegment(ansi.StatementSegment):
"""Overriding StatementSegment to allow for additional segment parsing."""
match_grammar = ansi.StatementSegment.parse_grammar.copy(
insert=[
Ref("IfExpressionStatement"),
Ref("DeclareStatementSegment"),
Ref("DeclareCursorStatementSegment"),
Ref("SetStatementSegment"),
Ref("AlterTableSwitchStatementSegment"),
Ref("PrintStatementSegment"),
Ref(
"CreateTableAsSelectStatementSegment"
), # Azure Synapse Analytics specific
Ref("RenameStatementSegment"), # Azure Synapse Analytics specific
Ref("ExecuteScriptSegment"),
Ref("DropStatisticsStatementSegment"),
Ref("DropProcedureStatementSegment"),
Ref("UpdateStatisticsStatementSegment"),
Ref("BeginEndSegment"),
Ref("TryCatchSegment"),
Ref("MergeStatementSegment"),
Ref("ThrowStatementSegment"),
Ref("RaiserrorStatementSegment"),
Ref("ReturnStatementSegment"),
Ref("GotoStatement"),
Ref("LabelStatementSegment"),
Ref("DisableTriggerStatementSegment"),
Ref("WhileExpressionStatement"),
Ref("BreakStatement"),
Ref("ContinueStatement"),
Ref("WaitForStatementSegment"),
Ref("OpenCursorStatementSegment"),
Ref("CloseCursorStatementSegment"),
Ref("DeallocateCursorStatementSegment"),
Ref("FetchCursorStatementSegment"),
Ref("CreateTypeStatementSegment"),
],
remove=[
Ref("CreateModelStatementSegment"),
Ref("DropModelStatementSegment"),
Ref("DescribeStatementSegment"),
],
)
parse_grammar = match_grammar
class GreaterThanOrEqualToSegment(BaseSegment):
"""Greater than or equal to operator.
N.B. Patching to add !< and
to allow spaces between operators.
"""
type = "comparison_operator"
name = "greater_than_equal_to"
match_grammar = OneOf(
Sequence(
Ref("RawGreaterThanSegment"),
Ref("RawEqualsSegment"),
),
Sequence(
Ref("RawNotSegment"),
Ref("RawLessThanSegment"),
),
)
class LessThanOrEqualToSegment(BaseSegment):
"""Greater than or equal to operator.
N.B. Patching to add !> and
to allow spaces between operators.
"""
type = "comparison_operator"
name = "less_than_equal_to"
match_grammar = OneOf(
Sequence(
Ref("RawLessThanSegment"),
Ref("RawEqualsSegment"),
),
Sequence(
Ref("RawNotSegment"),
Ref("RawGreaterThanSegment"),
),
)
class NotEqualToSegment(BaseSegment):
"""Not equal to operator.
N.B. Patching to allow spaces between operators.
"""
type = "comparison_operator"
name = "not_equal_to"
match_grammar = OneOf(
Sequence(Ref("RawNotSegment"), Ref("RawEqualsSegment")),
Sequence(Ref("RawLessThanSegment"), Ref("RawGreaterThanSegment")),
)
class SelectClauseElementSegment(ansi.SelectClauseElementSegment):
"""An element in the targets of a select statement.
Overriding ANSI to remove GreedyUntil logic which assumes statements have been
delimited
"""
# Important to split elements before parsing, otherwise debugging is really hard.
match_grammar = OneOf(
# *, blah.*, blah.blah.*, etc.
Ref("WildcardExpressionSegment"),
Sequence(
Ref("AltAliasExpressionSegment"),
Ref("BaseExpressionElementGrammar"),
),
Sequence(
Ref("BaseExpressionElementGrammar"),
Ref("AliasExpressionSegment", optional=True),
),
)
parse_grammar = None
class AltAliasExpressionSegment(BaseSegment):
"""An alternative alias clause as used by tsql using `=`."""
type = "alias_expression"
match_grammar = Sequence(
OneOf(
Ref("SingleIdentifierGrammar"),
Ref("SingleQuotedIdentifierSegment"),
),
Ref("RawEqualsSegment"),
)
class SelectClauseModifierSegment(BaseSegment):
"""Things that come after SELECT but before the columns."""
type = "select_clause_modifier"
match_grammar = OneOf(
"DISTINCT",
"ALL",
Sequence(
# https://docs.microsoft.com/en-us/sql/t-sql/queries/top-transact-sql?view=sql-server-ver15
"TOP",
OptionallyBracketed(Ref("ExpressionSegment")),
Sequence("PERCENT", optional=True),
Sequence("WITH", "TIES", optional=True),
),
)
class SelectClauseSegment(BaseSegment):
"""A group of elements in a select target statement.
Overriding ANSI to remove StartsWith logic which assumes statements have been
delimited
"""
type = "select_clause"
match_grammar = Ref("SelectClauseSegmentGrammar")
class UnorderedSelectStatementSegment(BaseSegment):
"""A `SELECT` statement without any ORDER clauses or later.
We need to change ANSI slightly to remove LimitClauseSegment
and NamedWindowSegment which don't exist in T-SQL.
We also need to get away from ANSI's use of StartsWith.
There's not a clean list of terminators that can be used
to identify the end of a TSQL select statement. Semi-colon is optional.
"""
type = "select_statement"
match_grammar = Sequence(
Ref("SelectClauseSegment"),
# Dedent for the indent in the select clause.
# It's here so that it can come AFTER any whitespace.
Dedent,
Ref("IntoTableSegment", optional=True),
Ref("FromClauseSegment", optional=True),
Ref("PivotUnpivotStatementSegment", optional=True),
Ref("WhereClauseSegment", optional=True),
Ref("GroupByClauseSegment", optional=True),
Ref("HavingClauseSegment", optional=True),
)
class InsertStatementSegment(BaseSegment):
"""An `INSERT` statement.
Overriding ANSI definition to remove StartsWith logic that doesn't handle optional
delimitation well.
"""
type = "insert_statement"
match_grammar = Sequence(
"INSERT",
Ref.keyword("INTO", optional=True),
Ref("TableReferenceSegment"),
Ref("PostTableExpressionGrammar", optional=True),
Ref("BracketedColumnReferenceListGrammar", optional=True),
Ref("OutputClauseSegment", optional=True),
OneOf(Ref("SelectableGrammar"), Ref("ExecuteScriptSegment")),
)
class WithCompoundStatementSegment(BaseSegment):
"""A `SELECT` statement preceded by a selection of `WITH` clauses.
`WITH tab (col1,col2) AS (SELECT a,b FROM x)`
Overriding ANSI to remove the greedy matching of StartsWith().
"""
type = "with_compound_statement"
# match grammar
match_grammar = Sequence(
"WITH",
Ref.keyword("RECURSIVE", optional=True),
Conditional(Indent, indented_ctes=True),
Delimited(
Ref("CTEDefinitionSegment"),
terminator=Ref.keyword("SELECT"),
),
Conditional(Dedent, indented_ctes=True),
OneOf(
Ref("NonWithSelectableGrammar"),
Ref("NonWithNonSelectableGrammar"),
Ref("MergeStatementSegment"),
),
)
class SelectStatementSegment(BaseSegment):
"""A `SELECT` statement.
We need to change ANSI slightly to remove LimitClauseSegment
and NamedWindowSegment which don't exist in T-SQL.
We also need to get away from ANSI's use of StartsWith.
There's not a clean list of terminators that can be used
to identify the end of a TSQL select statement. Semi-colon is optional.
"""
type = "select_statement"
# Remove the Limit and Window statements from ANSI
match_grammar = UnorderedSelectStatementSegment.match_grammar.copy(
insert=[
Ref("OrderByClauseSegment", optional=True),
Ref("OptionClauseSegment", optional=True),
Ref("DelimiterGrammar", optional=True),
Ref("ForXmlSegment", optional=True),
]
)
class IntoTableSegment(BaseSegment):
"""`INTO` clause within `SELECT`.
https://docs.microsoft.com/en-us/sql/t-sql/queries/select-into-clause-transact-sql?view=sql-server-ver15
"""
type = "into_table_clause"
match_grammar = Sequence("INTO", Ref("ObjectReferenceSegment"))
class WhereClauseSegment(BaseSegment):
"""A `WHERE` clause like in `SELECT` or `INSERT`.
Overriding ANSI in order to get away from the use of
StartsWith. There's not a clean list of terminators that can be used
to identify the end of a TSQL select statement. Semi-colon is optional.
"""
type = "where_clause"
match_grammar = Sequence(
"WHERE",
Indent,
OptionallyBracketed(Ref("ExpressionSegment")),
Dedent,
)
class CreateIndexStatementSegment(BaseSegment):
"""A `CREATE INDEX` or `CREATE STATISTICS` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-statistics-transact-sql?view=sql-server-ver15
"""
type = "create_index_statement"
match_grammar = Sequence(
"CREATE",
Indent,
Ref("OrReplaceGrammar", optional=True),
Sequence("UNIQUE", optional=True),
OneOf("CLUSTERED", "NONCLUSTERED", optional=True),
OneOf("INDEX", "STATISTICS"),
Ref("IfNotExistsGrammar", optional=True),
Ref("IndexReferenceSegment"),
"ON",
Ref("TableReferenceSegment"),
Ref("BracketedIndexColumnListGrammar"),
Sequence(
"INCLUDE",
Ref("BracketedColumnReferenceListGrammar"),
optional=True,
),
Ref("WhereClauseSegment", optional=True),
Ref("RelationalIndexOptionsSegment", optional=True),
Ref("OnPartitionOrFilegroupOptionSegment", optional=True),
Ref("FilestreamOnOptionSegment", optional=True),
Ref("DelimiterGrammar", optional=True),
Dedent,
)
class OnPartitionOrFilegroupOptionSegment(BaseSegment):
"""ON partition scheme or filegroup option.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
"""
type = "on_partition_or_filegroup_statement"
match_grammar = OneOf(
Ref("PartitionSchemeClause"),
Ref("FilegroupClause"),
Ref("LiteralGrammar"), # for "default" value
)
class FilestreamOnOptionSegment(BaseSegment):
"""FILESTREAM_ON index option in `CREATE INDEX` and 'CREATE TABLE' statements.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
"""
type = "filestream_on_option_statement"
match_grammar = Sequence(
"FILESTREAM_ON",
OneOf(
Ref("FilegroupNameSegment"),
Ref("PartitionSchemeNameSegment"),
OneOf(
"NULL",
Ref("LiteralGrammar"), # for "default" value
),
),
)
class TextimageOnOptionSegment(BaseSegment):
"""TEXTIMAGE ON option in `CREATE TABLE` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
"""
type = "textimage_on_option_statement"
match_grammar = Sequence(
"TEXTIMAGE_ON",
OneOf(
Ref("FilegroupNameSegment"),
Ref("LiteralGrammar"), # for "default" value
),
)
class ReferencesConstraintGrammar(BaseSegment):
"""REFERENCES constraint option in `CREATE TABLE` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
"""
type = "references_constraint_grammar"
match_grammar = Sequence(
# REFERENCES reftable [ ( refcolumn) ]
"REFERENCES",
Ref("TableReferenceSegment"),
# Foreign columns making up FOREIGN KEY constraint
Ref("BracketedColumnReferenceListGrammar", optional=True),
Sequence(
"ON",
"DELETE",
OneOf(
Sequence("NO", "ACTION"),
"CASCADE",
Sequence("SET", "NULL"),
Sequence("SET", "DEFAULT"),
),
optional=True,
),
Sequence(
"ON",
"UPDATE",
OneOf(
Sequence("NO", "ACTION"),
"CASCADE",
Sequence("SET", "NULL"),
Sequence("SET", "DEFAULT"),
),
optional=True,
),
Sequence("NOT", "FOR", "REPLICATION", optional=True),
)
class CheckConstraintGrammar(BaseSegment):
"""CHECK constraint option in `CREATE TABLE` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
"""
type = "check_constraint_grammar"
match_grammar = Sequence(
"CHECK",
Sequence("NOT", "FOR", "REPLICATION", optional=True),
Bracketed(
Ref("ExpressionSegment"),
),
)
class RelationalIndexOptionsSegment(BaseSegment):
"""A relational index options in `CREATE INDEX` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
"""
type = "relational_index_options"
match_grammar = Sequence(
"WITH",
OptionallyBracketed(
Delimited(
AnyNumberOf(
Sequence(
OneOf(
"PAD_INDEX",
"FILLFACTOR",
"SORT_IN_TEMPDB",
"IGNORE_DUP_KEY",
"STATISTICS_NORECOMPUTE",
"STATISTICS_INCREMENTAL",
"DROP_EXISTING",
"RESUMABLE",
"ALLOW_ROW_LOCKS",
"ALLOW_PAGE_LOCKS",
"OPTIMIZE_FOR_SEQUENTIAL_KEY",
"MAXDOP",
),
Ref("EqualsSegment"),
OneOf(
"ON",
"OFF",
Ref("LiteralGrammar"),
),
),
Ref("MaxDurationSegment"),
Sequence(
"ONLINE",
Ref("EqualsSegment"),
OneOf(
"OFF",
Sequence(
"ON",
Bracketed(
Sequence(
"WAIT_AT_LOW_PRIORITY",
Bracketed(
Delimited(
Ref("MaxDurationSegment"),
Sequence(
"ABORT_AFTER_WAIT",
Ref("EqualsSegment"),
OneOf(
"NONE",
"SELF",
"BLOCKERS",
),
),
delimiter=Ref("CommaSegment"),
),
),
),
optional=True,
),
),
),
),
# for table constrains
Sequence(
"COMPRESSION_DELAY",
Ref("EqualsSegment"),
Ref("NumericLiteralSegment"),
Sequence(
"MINUTES",
optional=True,
),
),
Sequence(
"DATA_COMPRESSION",
Ref("EqualsSegment"),
OneOf(
"NONE",
"ROW",
"PAGE",
"COLUMNSTORE", # for table constrains
"COLUMNSTORE_ARCHIVE", # for table constrains
),
Ref("OnPartitionsSegment", optional=True),
),
min_times=1,
),
delimiter=Ref("CommaSegment"),
),
),
)
class MaxDurationSegment(BaseSegment):
"""A `MAX DURATION` clause.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
"""
type = "max_duration"
match_grammar = Sequence(
"MAX_DURATION",
Ref("EqualsSegment"),
Ref("NumericLiteralSegment"),
Sequence(
"MINUTES",
optional=True,
),
)
class DropIndexStatementSegment(ansi.DropIndexStatementSegment):
"""A `DROP INDEX` statement.
Overriding ANSI to include required ON clause.
"""
match_grammar = Sequence(
"DROP",
"INDEX",
Ref("IfExistsGrammar", optional=True),
Ref("IndexReferenceSegment"),
"ON",
Ref("TableReferenceSegment"),
Ref("DelimiterGrammar", optional=True),
)
class DropStatisticsStatementSegment(BaseSegment):
"""A `DROP STATISTICS` statement."""
type = "drop_statement"
# DROP INDEX <Index name> [CONCURRENTLY] [IF EXISTS] {RESTRICT | CASCADE}
match_grammar = Sequence(
"DROP",
OneOf("STATISTICS"),
Ref("IndexReferenceSegment"),
Ref("DelimiterGrammar", optional=True),
)
class UpdateStatisticsStatementSegment(BaseSegment):
"""An `UPDATE STATISTICS` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/update-statistics-transact-sql?view=sql-server-ver15
"""
type = "update_statistics_statement"
match_grammar = Sequence(
"UPDATE",
"STATISTICS",
Ref("ObjectReferenceSegment"),
OneOf(
Ref("SingleIdentifierGrammar"),
Bracketed(
Delimited(
Ref("SingleIdentifierGrammar"),
),
),
optional=True,
),
Ref("DelimiterGrammar", optional=True),
)
class ObjectReferenceSegment(ansi.ObjectReferenceSegment):
"""A reference to an object.
Update ObjectReferenceSegment to only allow dot separated SingleIdentifierGrammar
So Square Bracketed identifiers can be matched.
"""
# match grammar (allow whitespace)
match_grammar: Matchable = Sequence(
Ref("SingleIdentifierGrammar"),
AnyNumberOf(
Sequence(
Ref("DotSegment"),
Ref("SingleIdentifierGrammar", optional=True),
),
min_times=0,
max_times=3,
),
)
class TableReferenceSegment(ObjectReferenceSegment):
"""A reference to an table, CTE, subquery or alias.
Overriding to capture TSQL's override of ObjectReferenceSegment
"""
type = "table_reference"
class SchemaReferenceSegment(ObjectReferenceSegment):
"""A reference to a schema.
Overriding to capture TSQL's override of ObjectReferenceSegment
"""
type = "schema_reference"
class DatabaseReferenceSegment(ObjectReferenceSegment):
"""A reference to a database.
Overriding to capture TSQL's override of ObjectReferenceSegment
"""
type = "database_reference"
class IndexReferenceSegment(ObjectReferenceSegment):
"""A reference to an index.
Overriding to capture TSQL's override of ObjectReferenceSegment
"""
type = "index_reference"
class ExtensionReferenceSegment(ObjectReferenceSegment):
"""A reference to an extension.
Overriding to capture TSQL's override of ObjectReferenceSegment
"""
type = "extension_reference"
class ColumnReferenceSegment(ObjectReferenceSegment):
"""A reference to column, field or alias.
Overriding to capture TSQL's override of ObjectReferenceSegment
"""
type = "column_reference"
class SequenceReferenceSegment(ObjectReferenceSegment):
"""A reference to a sequence.
Overriding to capture TSQL's override of ObjectReferenceSegment
"""
type = "sequence_reference"
class PivotColumnReferenceSegment(ObjectReferenceSegment):
"""A reference to a PIVOT column.
Used to differentiate it from a regular column reference.
"""
type = "pivot_column_reference"
class PivotUnpivotStatementSegment(BaseSegment):
"""Declaration of a variable.
https://docs.microsoft.com/en-us/sql/t-sql/queries/from-using-pivot-and-unpivot?view=sql-server-ver15
"""
type = "from_pivot_expression"
match_grammar = Sequence(
OneOf(
Sequence(
"PIVOT",
OptionallyBracketed(
Sequence(
OptionallyBracketed(Ref("FunctionSegment")),
"FOR",
Ref("ColumnReferenceSegment"),
"IN",
Bracketed(Delimited(Ref("PivotColumnReferenceSegment"))),
)
),
),
Sequence(
"UNPIVOT",
OptionallyBracketed(
Sequence(
OptionallyBracketed(Ref("ColumnReferenceSegment")),
"FOR",
Ref("ColumnReferenceSegment"),
"IN",
Bracketed(Delimited(Ref("PivotColumnReferenceSegment"))),
)
),
),
),
Sequence("AS", optional=True),
Ref("TableReferenceSegment"),
)
class DeclareStatementSegment(BaseSegment):
"""Declaration of a variable.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/declare-local-variable-transact-sql?view=sql-server-ver15
"""
type = "declare_segment"
match_grammar = Sequence(
"DECLARE",
Indent,
Delimited(
Sequence(
Ref("ParameterNameSegment"),
Sequence("AS", optional=True),
OneOf(
Sequence(
Ref("DatatypeSegment"),
Sequence(
Ref("EqualsSegment"),
Ref("ExpressionSegment"),
optional=True,
),
),
Sequence(
"TABLE",
Bracketed(
Delimited(
OneOf(
Ref("TableConstraintSegment"),
Ref("ColumnDefinitionSegment"),
),
allow_trailing=True,
)
),
),
),
),
),
Dedent,
Ref("DelimiterGrammar", optional=True),
)
class DeclareCursorStatementSegment(BaseSegment):
"""Declaration of a cursor.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/declare-cursor-transact-sql?view=sql-server-ver15
"""
type = "declare_segment"
match_grammar = Sequence(
"DECLARE",
Ref("NakedIdentifierSegment"),
"CURSOR",
OneOf("LOCAL", "GLOBAL", optional=True),
OneOf("FORWARD_ONLY", "SCROLL", optional=True),
OneOf("STATIC", "KEYSET", "DYNAMIC", "FAST_FORWARD", optional=True),
OneOf("READ_ONLY", "SCROLL_LOCKS", "OPTIMISTIC", optional=True),
Sequence("TYPE_WARNING", optional=True),
"FOR",
Ref("SelectStatementSegment"),
)
class GoStatementSegment(BaseSegment):
"""GO signals the end of a batch of Transact-SQL statements.
GO statements are not part of the TSQL language. They are used to signal batch
statements so that clients know in how batches of statements can be executed.
"""
type = "go_statement"
match_grammar = Ref.keyword("GO")
class DatatypeSegment(BaseSegment):
"""A data type segment.
Updated for Transact-SQL to allow bracketed data types with bracketed schemas.
"""
type = "data_type"
match_grammar = Sequence(
# Some dialects allow optional qualification of data types with schemas
Sequence(
Ref("SingleIdentifierGrammar"),
Ref("DotSegment"),
allow_gaps=False,
optional=True,
),
OneOf(
Ref("DatatypeIdentifierSegment"),
Bracketed(Ref("DatatypeIdentifierSegment"), bracket_type="square"),
),
Bracketed(
OneOf(
"MAX",
Delimited(Ref("ExpressionSegment")),
# The brackets might be empty for some cases...
optional=True,
),
# There may be no brackets for some data types
optional=True,
),
Ref("CharCharacterSetGrammar", optional=True),
)
class CreateSequenceOptionsSegment(BaseSegment):
"""Options for Create Sequence statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-sequence-transact-sql?view=sql-server-ver15
"""
type = "create_sequence_options_segment"
match_grammar = OneOf(
Sequence(
"AS",
Ref("DatatypeSegment"),
),
Sequence("START", "WITH", Ref("NumericLiteralSegment")),
Sequence("INCREMENT", "BY", Ref("NumericLiteralSegment")),
Sequence("MINVALUE", Ref("NumericLiteralSegment")),
Sequence("NO", "MINVALUE"),
Sequence("MAXVALUE", Ref("NumericLiteralSegment")),
Sequence("NO", "MAXVALUE"),
Sequence(
Sequence("NO", optional=True),
"CYCLE",
),
Sequence(
"CACHE",
Ref("NumericLiteralSegment"),
),
Sequence(
"NO",
"CACHE",
),
)
class NextValueSequenceSegment(BaseSegment):
"""Segment to get next value from a sequence."""
type = "sequence_next_value"
match_grammar = Sequence(
"NEXT",
"VALUE",
"FOR",
Ref("ObjectReferenceSegment"),
)
class IfExpressionStatement(BaseSegment):
"""IF-ELSE statement.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/if-else-transact-sql?view=sql-server-ver15
"""
type = "if_then_statement"
match_grammar = Sequence(
Ref("IfClauseSegment"),
Indent,
Ref("StatementAndDelimiterGrammar"),
Dedent,
AnyNumberOf(
# ELSE IF included explicitly to allow for correct indentation
Sequence(
"ELSE",
Ref("IfClauseSegment"),
Indent,
Ref("StatementAndDelimiterGrammar"),
Dedent,
),
),
Sequence(
"ELSE",
Indent,
Ref("StatementAndDelimiterGrammar"),
Dedent,
optional=True,
),
)
class IfClauseSegment(BaseSegment):
"""IF clause."""
type = "if_clause"
match_grammar = Sequence(
"IF",
Indent,
Ref("ExpressionSegment"),
Dedent,
)
class WhileExpressionStatement(BaseSegment):
"""WHILE statement.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/while-transact-sql?view=sql-server-ver15
"""
type = "while_statement"
match_grammar = Sequence(
"WHILE",
Ref("ExpressionSegment"),
Indent,
Ref("StatementAndDelimiterGrammar"),
Dedent,
)
class BreakStatement(BaseSegment):
"""BREAK statement.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/break-transact-sql?view=sql-server-ver15
"""
type = "break_statement"
match_grammar = Sequence(
"BREAK",
)
class ContinueStatement(BaseSegment):
"""CONTINUE statement.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/continue-transact-sql?view=sql-server-ver15
"""
type = "continue_statement"
match_grammar = Sequence(
"CONTINUE",
)
class WaitForStatementSegment(BaseSegment):
"""WAITFOR statement.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/waitfor-transact-sql?view=sql-server-ver15
Partially implemented, lacking Receive and Get Conversation Group statements for
now.
"""
type = "waitfor_statement"
match_grammar = Sequence(
"WAITFOR",
OneOf(
Sequence("DELAY", Ref("ExpressionSegment")),
Sequence("TIME", Ref("ExpressionSegment")),
),
Sequence("TIMEOUT", Ref("NumericLiteralSegment"), optional=True),
)
class ColumnConstraintSegment(BaseSegment):
"""A column option; each CREATE TABLE column can have 0 or more."""
type = "column_constraint_segment"
# Column constraint from
# https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
match_grammar = Sequence(
Sequence(
"CONSTRAINT",
Ref("ObjectReferenceSegment"), # Constraint name
optional=True,
),
OneOf(
"FILESTREAM",
Sequence(
"COLLATE", Ref("ObjectReferenceSegment")
), # [COLLATE collation_name]
"SPARSE",
Sequence(
"MASKED",
"WITH",
Bracketed("FUNCTION", Ref("EqualsSegment"), Ref("LiteralGrammar")),
),
Sequence(
Sequence(
"CONSTRAINT",
Ref("ObjectReferenceSegment"), # Constraint name
optional=True,
),
# DEFAULT <value>
"DEFAULT",
OptionallyBracketed(
OneOf(
OptionallyBracketed(Ref("LiteralGrammar")), # ((-1))
Ref("FunctionSegment"),
Ref("NextValueSequenceSegment"),
),
),
),
Ref("IdentityGrammar"),
Sequence("NOT", "FOR", "REPLICATION"),
Sequence(
Sequence("GENERATED", "ALWAYS", "AS"),
OneOf("ROW", "TRANSACTION_ID", "SEQUENCE_NUMBER"),
OneOf("START", "END"),
Ref.keyword("HIDDEN", optional=True),
),
Sequence(Ref.keyword("NOT", optional=True), "NULL"), # NOT NULL or NULL
"ROWGUIDCOL",
Ref("EncryptedWithGrammar"),
Ref("PrimaryKeyGrammar"),
Ref("RelationalIndexOptionsSegment"),
Ref("OnPartitionOrFilegroupOptionSegment"),
"UNIQUE", # UNIQUE #can be removed as included in PrimaryKeyGrammar?
Ref("ForeignKeyGrammar"),
Ref("ReferencesConstraintGrammar"),
Ref("CheckConstraintGrammar"),
Ref("FilestreamOnOptionSegment", optional=True),
# column_index
Sequence(
"INDEX",
Ref("ObjectReferenceSegment"), # index name
OneOf("CLUSTERED", "NONCLUSTERED", optional=True),
# other optional blocks (RelationalIndexOptionsSegment,
# OnIndexOptionSegment,FilestreamOnOptionSegment) are mentioned above
),
# computed_column_definition
Sequence("AS", Ref("ExpressionSegment")),
Sequence("PERSISTED", Sequence("NOT", "NULL", optional=True))
# other optional blocks (RelationalIndexOptionsSegment,
# OnIndexOptionSegment, ReferencesConstraintGrammar, CheckConstraintGrammar)
# are mentioned above
),
)
class FunctionParameterListGrammar(BaseSegment):
"""The parameters for a function ie.
`(@city_name NVARCHAR(30), @postal_code NVARCHAR(15))`.
Overriding ANSI (1) to optionally bracket and (2) remove Delimited
"""
type = "function_parameter_list"
# Function parameter list
match_grammar = Bracketed(
Delimited(
Ref("FunctionParameterGrammar"),
optional=True,
),
)
class CreateFunctionStatementSegment(BaseSegment):
"""A `CREATE FUNCTION` statement.
This version in the TSQL dialect should be a "common subset" of the
structure of the code for those dialects.
Updated to include AS after declaration of RETURNS. Might be integrated in ANSI
though.
https://www.postgresql.org/docs/9.1/sql-createfunction.html
https://docs.snowflake.com/en/sql-reference/sql/create-function.html
https://cloud.google.com/bigquery/docs/reference/standard-sql/user-defined-functions
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-function-transact-sql?view=sql-server-ver15
"""
type = "create_function_statement"
match_grammar = Sequence(
"CREATE",
Sequence("OR", "ALTER", optional=True),
"FUNCTION",
Ref("ObjectReferenceSegment"),
Ref("FunctionParameterListGrammar"),
Sequence( # Optional function return type
"RETURNS",
OneOf(
Ref("DatatypeSegment"),
"TABLE",
Sequence(
Ref("ParameterNameSegment"),
"TABLE",
Bracketed(
Delimited(
OneOf(
Ref("TableConstraintSegment"),
Ref("ColumnDefinitionSegment"),
),
),
),
),
),
optional=True,
),
Ref("FunctionOptionSegment", optional=True),
"AS",
Ref("ProcedureDefinitionGrammar"),
)
class FunctionOptionSegment(BaseSegment):
"""A function option segment."""
type = "function_option_segment"
match_grammar = Sequence(
"WITH",
AnyNumberOf(
"ENCRYPTION",
"SCHEMABINDING",
Sequence(
OneOf(
Sequence(
"RETURNS",
"NULL",
),
"CALLED",
),
"ON",
"NULL",
"INPUT",
),
Ref("ExecuteAsClauseSegment"),
Sequence(
"INLINE",
Ref("EqualsSegment"),
OneOf(
"ON",
"OFF",
),
),
min_times=1,
),
)
class DropFunctionStatementSegment(BaseSegment):
"""A `DROP FUNCTION` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/drop-function-transact-sql?view=sql-server-ver15
"""
type = "drop_function_statement"
match_grammar = Sequence(
"DROP",
"FUNCTION",
Ref("IfExistsGrammar", optional=True),
Delimited(Ref("FunctionNameSegment")),
Ref("DelimiterGrammar", optional=True),
)
class ReturnStatementSegment(BaseSegment):
"""A RETURN statement."""
type = "return_segment"
match_grammar = Sequence(
"RETURN",
Ref("ExpressionSegment", optional=True),
Ref("DelimiterGrammar", optional=True),
)
class ExecuteAsClauseSegment(BaseSegment):
"""An EXECUTE AS clause.
https://docs.microsoft.com/en-us/sql/t-sql/statements/execute-as-clause-transact-sql?view=sql-server-ver15
"""
type = "execute_as_clause"
match_grammar = Sequence(
OneOf("EXEC", "EXECUTE"),
"AS",
OneOf(
"CALLER",
"SELF",
"OWNER",
Ref("QuotedLiteralSegment"),
),
)
class SetStatementSegment(BaseSegment):
"""A Set statement.
Setting an already declared variable or global variable.
https://docs.microsoft.com/en-us/sql/t-sql/statements/set-statements-transact-sql?view=sql-server-ver15
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/set-local-variable-transact-sql?view=sql-server-ver15
"""
type = "set_segment"
match_grammar = Sequence(
"SET",
Indent,
Delimited(
OneOf(
Sequence(
"TRANSACTION",
"ISOLATION",
"LEVEL",
OneOf(
"SNAPSHOT",
"SERIALIZABLE",
Sequence(
"REPEATABLE",
"READ",
),
Sequence(
"READ",
OneOf(
"COMMITTED",
"UNCOMMITTED",
),
),
),
),
Sequence(
OneOf(
"DATEFIRST",
"DATEFORMAT",
"DEADLOCK_PRIORITY",
"LOCK_TIMEOUT",
"CONCAT_NULL_YIELDS_NULL",
"CURSOR_CLOSE_ON_COMMIT",
"FIPS_FLAGGER",
Sequence("IDENTITY_INSERT", Ref("TableReferenceSegment")),
"LANGUAGE",
"OFFSETS",
"QUOTED_IDENTIFIER",
"ARITHABORT",
"ARITHIGNORE",
"FMTONLY",
"NOCOUNT",
"NOEXEC",
"NUMERIC_ROUNDABORT",
"PARSEONLY",
"QUERY_GOVERNOR_COST_LIMIT",
"RESULT_SET_CACHING", # Azure Synapse Analytics specific
"ROWCOUNT",
"TEXTSIZE",
"ANSI_DEFAULTS",
"ANSI_NULL_DFLT_OFF",
"ANSI_NULL_DFLT_ON",
"ANSI_NULLS",
"ANSI_PADDING",
"ANSI_WARNINGS",
"FORCEPLAN",
"SHOWPLAN_ALL",
"SHOWPLAN_TEXT",
"SHOWPLAN_XML",
Sequence(
"STATISTICS",
OneOf(
"IO",
"PROFILE",
"TIME",
"XML",
),
),
"IMPLICIT_TRANSACTIONS",
"REMOTE_PROC_TRANSACTIONS",
"XACT_ABORT",
),
OneOf(
"ON",
"OFF",
Sequence(
Ref("EqualsSegment"),
Ref("ExpressionSegment"),
),
),
),
Sequence(
Ref("ParameterNameSegment"),
Ref("AssignmentOperatorSegment"),
Ref("ExpressionSegment"),
),
),
),
Dedent,
Ref("DelimiterGrammar", optional=True),
)
class AssignmentOperatorSegment(BaseSegment):
"""One of the assignment operators.
Includes simpler equals but also +=, -=, etc.
"""
type = "assignment_operator"
match_grammar = OneOf(
Ref("EqualsSegment"),
Sequence(
OneOf(
Ref("PlusSegment"),
Ref("MinusSegment"),
Ref("DivideSegment"),
Ref("MultiplySegment"),
Ref("ModuloSegment"),
Ref("BitwiseAndSegment"),
Ref("BitwiseOrSegment"),
Ref("BitwiseXorSegment"),
),
Ref("EqualsSegment"),
allow_gaps=False,
),
)
class ProcedureParameterListGrammar(BaseSegment):
"""The parameters for a procedure ie.
`@city_name NVARCHAR(30), @postal_code NVARCHAR(15)`.
"""
type = "procedure_parameter_list"
# Function parameter list
match_grammar = OptionallyBracketed(
Delimited(
Sequence(
Ref("FunctionParameterGrammar"),
OneOf("OUT", "OUTPUT", "READONLY", optional=True),
),
optional=True,
),
)
class CreateProcedureStatementSegment(BaseSegment):
"""A `CREATE OR ALTER PROCEDURE` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-procedure-transact-sql?view=sql-server-ver15
"""
type = "create_procedure_statement"
match_grammar = Sequence(
"CREATE",
Sequence("OR", "ALTER", optional=True),
OneOf("PROCEDURE", "PROC"),
Ref("ObjectReferenceSegment"),
Indent,
Ref("ProcedureParameterListGrammar", optional=True),
Dedent,
"AS",
Ref("ProcedureDefinitionGrammar"),
)
class DropProcedureStatementSegment(BaseSegment):
"""A `DROP PROCEDURE` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/drop-procedure-transact-sql?view=sql-server-ver15
"""
type = "drop_procedure_statement"
match_grammar = Sequence(
"DROP",
OneOf("PROCEDURE", "PROC"),
Ref("IfExistsGrammar", optional=True),
Delimited(Ref("ObjectReferenceSegment")),
Ref("DelimiterGrammar", optional=True),
)
class ProcedureDefinitionGrammar(BaseSegment):
"""This is the body of a `CREATE OR ALTER PROCEDURE AS` statement.
This also handles the body of a `CREATE FUNCTION AS` statement.
"""
type = "procedure_statement"
name = "procedure_statement"
match_grammar = Ref("OneOrMoreStatementsGrammar")
class CreateViewStatementSegment(BaseSegment):
"""A `CREATE VIEW` statement.
Adjusted to allow CREATE OR ALTER instead of CREATE OR REPLACE.
# https://docs.microsoft.com/en-us/sql/t-sql/statements/create-view-transact-sql?view=sql-server-ver15#examples
"""
type = "create_view_statement"
match_grammar = Sequence(
"CREATE",
Sequence("OR", "ALTER", optional=True),
"VIEW",
Ref("ObjectReferenceSegment"),
Sequence(
"WITH",
Delimited("ENCRYPTION", "SCHEMABINDING", "VIEW_METADATA"),
optional=True,
),
"AS",
OptionallyBracketed(Ref("SelectableGrammar")),
Sequence("WITH", "CHECK", "OPTION", optional=True),
Ref("DelimiterGrammar", optional=True),
)
class MLTableExpressionSegment(BaseSegment):
"""An ML table expression.
Not present in T-SQL.
TODO: Consider whether this segment can be used to represent a PREDICT statement.
"""
type = "ml_table_expression"
match_grammar = Nothing()
class ConvertFunctionNameSegment(BaseSegment):
"""CONVERT function name segment.
Need to be able to specify this as type function_name
so that linting rules identify it properly
"""
type = "function_name"
match_grammar = OneOf("CONVERT", "TRY_CONVERT")
class CastFunctionNameSegment(BaseSegment):
"""CAST function name segment.
Need to be able to specify this as type function_name
so that linting rules identify it properly
"""
type = "function_name"
match_grammar = Sequence("CAST")
class RankFunctionNameSegment(BaseSegment):
"""Rank function name segment.
Need to be able to specify this as type function_name
so that linting rules identify it properly
"""
type = "function_name"
match_grammar = OneOf("DENSE_RANK", "NTILE", "RANK", "ROW_NUMBER")
class WithinGroupFunctionNameSegment(BaseSegment):
"""WITHIN GROUP function name segment.
For aggregation functions that use the WITHIN GROUP clause.
https://docs.microsoft.com/en-us/sql/t-sql/functions/string-agg-transact-sql?view=sql-server-ver15
https://docs.microsoft.com/en-us/sql/t-sql/functions/percentile-cont-transact-sql?view=sql-server-ver15
https://docs.microsoft.com/en-us/sql/t-sql/functions/percentile-disc-transact-sql?view=sql-server-ver15
Need to be able to specify this as type function_name
so that linting rules identify it properly
"""
type = "function_name"
match_grammar = OneOf(
"STRING_AGG",
"PERCENTILE_CONT",
"PERCENTILE_DISC",
)
class WithinGroupClause(BaseSegment):
"""WITHIN GROUP clause.
For a small set of aggregation functions.
https://docs.microsoft.com/en-us/sql/t-sql/functions/string-agg-transact-sql?view=sql-server-ver15
https://docs.microsoft.com/en-us/sql/t-sql/functions/percentile-cont-transact-sql?view=sql-server-ver15
"""
type = "within_group_clause"
match_grammar = Sequence(
"WITHIN",
"GROUP",
Bracketed(
Ref("OrderByClauseSegment"),
),
Sequence(
"OVER",
Bracketed(Ref("PartitionClauseSegment")),
optional=True,
),
)
class PartitionClauseSegment(ansi.PartitionClauseSegment):
"""PARTITION BY clause.
https://docs.microsoft.com/en-us/sql/t-sql/queries/select-over-clause-transact-sql?view=sql-server-ver15#partition-by
"""
type = "partitionby_clause"
match_grammar = Sequence(
"PARTITION",
"BY",
Delimited(
OptionallyBracketed(
OneOf(
Ref("ColumnReferenceSegment"),
Bracketed(
Ref("SelectStatementSegment"),
),
Ref("FunctionSegment"),
Ref("VariableIdentifierSegment"),
),
),
),
)
parse_grammar = None
class OnPartitionsSegment(BaseSegment):
"""ON PARTITIONS clause.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
"""
type = "on_partitions_clause"
match_grammar = Sequence(
"ON",
"PARTITIONS",
Bracketed(
Delimited(
OneOf(
Ref("NumericLiteralSegment"),
Sequence(
Ref("NumericLiteralSegment"), "TO", Ref("NumericLiteralSegment")
),
)
)
),
)
class PartitionSchemeNameSegment(BaseSegment):
"""Partition Scheme Name."""
type = "partition_scheme_name"
match_grammar = Ref("SingleIdentifierGrammar")
class PartitionSchemeClause(BaseSegment):
"""Partition Scheme Clause segment.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15
"""
type = "partition_scheme_clause"
match_grammar = Sequence(
"ON",
Ref("PartitionSchemeNameSegment"),
Bracketed(Ref("ColumnReferenceSegment")),
)
class FunctionSegment(BaseSegment):
"""A scalar or aggregate function.
Maybe in the future we should distinguish between
aggregate functions and other functions. For now
we treat them the same because they look the same
for our purposes.
"""
type = "function"
match_grammar = OneOf(
Sequence(
# Treat functions which take date parts separately
# So those functions parse date parts as DatetimeUnitSegment
# rather than identifiers.
Ref("DatePartFunctionNameSegment"),
Bracketed(
Delimited(
Ref("DatetimeUnitSegment"),
Ref(
"FunctionContentsGrammar",
# The brackets might be empty for some functions...
optional=True,
ephemeral_name="FunctionContentsGrammar",
),
)
),
),
Sequence(
Ref("RankFunctionNameSegment"),
Bracketed(
Ref("NumericLiteralSegment", optional=True),
),
"OVER",
Bracketed(
Ref("PartitionClauseSegment", optional=True),
Ref("OrderByClauseSegment"),
),
),
Sequence(
# https://docs.microsoft.com/en-us/sql/t-sql/functions/cast-and-convert-transact-sql?view=sql-server-ver15
Ref("ConvertFunctionNameSegment"),
Bracketed(
Ref("DatatypeSegment"),
Bracketed(Ref("NumericLiteralSegment"), optional=True),
Ref("CommaSegment"),
Ref("ExpressionSegment"),
Sequence(
Ref("CommaSegment"), Ref("NumericLiteralSegment"), optional=True
),
),
),
Sequence(
# https://docs.microsoft.com/en-us/sql/t-sql/functions/cast-and-convert-transact-sql?view=sql-server-ver15
Ref("CastFunctionNameSegment"),
Bracketed(
Ref("ExpressionSegment"),
"AS",
Ref("DatatypeSegment"),
),
),
Sequence(
Ref("WithinGroupFunctionNameSegment"),
Bracketed(
Delimited(
Ref(
"FunctionContentsGrammar",
# The brackets might be empty for some functions...
optional=True,
ephemeral_name="FunctionContentsGrammar",
),
),
),
Ref("WithinGroupClause", optional=True),
),
Sequence(
Ref(
"FunctionNameSegment",
exclude=OneOf(
Ref("ValuesClauseSegment"),
# List of special functions handled differently
Ref("CastFunctionNameSegment"),
Ref("ConvertFunctionNameSegment"),
Ref("DatePartFunctionNameSegment"),
Ref("WithinGroupFunctionNameSegment"),
Ref("RankFunctionNameSegment"),
),
),
Bracketed(
Ref(
"FunctionContentsGrammar",
# The brackets might be empty for some functions...
optional=True,
ephemeral_name="FunctionContentsGrammar",
)
),
Ref("PostFunctionGrammar", optional=True),
),
)
class CreateTableStatementSegment(BaseSegment):
"""A `CREATE TABLE` statement."""
type = "create_table_statement"
# https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
# https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-azure-sql-data-warehouse?view=aps-pdw-2016-au7
match_grammar = Sequence(
"CREATE",
"TABLE",
Ref("TableReferenceSegment"),
OneOf(
# Columns and comment syntax:
Sequence(
Bracketed(
Delimited(
OneOf(
Ref("TableConstraintSegment"),
Ref("ColumnDefinitionSegment"),
Ref("TableIndexSegment"),
),
allow_trailing=True,
)
),
),
# Create AS syntax:
Sequence(
"AS",
OptionallyBracketed(Ref("SelectableGrammar")),
),
# Create like syntax
Sequence("LIKE", Ref("TableReferenceSegment")),
),
Ref(
"TableDistributionIndexClause", optional=True
), # Azure Synapse Analytics specific
Ref("OnPartitionOrFilegroupOptionSegment", optional=True),
Ref("FilestreamOnOptionSegment", optional=True),
Ref("TextimageOnOptionSegment", optional=True),
# need to add table options here
Ref("DelimiterGrammar", optional=True),
)
parse_grammar = match_grammar
class AlterTableStatementSegment(BaseSegment):
"""An `ALTER TABLE` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/alter-table-transact-sql?view=sql-server-ver15
Overriding ANSI to remove TSQL non-keywords MODIFY, FIRST
TODO: Flesh out TSQL-specific functionality
"""
type = "alter_table_statement"
match_grammar = Sequence(
"ALTER",
"TABLE",
Ref("TableReferenceSegment"),
Delimited(
OneOf(
# Table options
Sequence(
Ref("ParameterNameSegment"),
Ref("EqualsSegment", optional=True),
OneOf(Ref("LiteralGrammar"), Ref("NakedIdentifierSegment")),
),
Sequence(
OneOf(
"ADD",
"ALTER",
),
Ref.keyword("COLUMN", optional=True),
Ref("ColumnDefinitionSegment"),
),
Sequence(
"ADD",
Ref("ColumnConstraintSegment"),
"FOR",
Ref("ColumnReferenceSegment"),
),
Sequence(
Sequence(
"WITH",
"CHECK",
optional=True,
),
"ADD",
Ref("TableConstraintSegment"),
),
Sequence(
OneOf(
"CHECK",
"DROP",
),
"CONSTRAINT",
Ref("ObjectReferenceSegment"),
),
# Rename
Sequence(
"RENAME",
OneOf("AS", "TO", optional=True),
Ref("TableReferenceSegment"),
),
),
),
)
class TableConstraintSegment(BaseSegment):
"""A table constraint, e.g. for CREATE TABLE."""
# https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
type = "table_constraint"
match_grammar = Sequence(
Sequence( # [ CONSTRAINT <Constraint name> ]
"CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True
),
OneOf(
Sequence(
Ref("PrimaryKeyGrammar"),
Ref("BracketedIndexColumnListGrammar"),
Ref("RelationalIndexOptionsSegment", optional=True),
Ref("OnPartitionOrFilegroupOptionSegment", optional=True),
),
Sequence( # FOREIGN KEY ( column_name [, ... ] )
# REFERENCES reftable [ ( refcolumn [, ... ] ) ]
Ref("ForeignKeyGrammar"),
# Local columns making up FOREIGN KEY constraint
Ref("BracketedColumnReferenceListGrammar"),
# REFERENCES reftable [ ( refcolumn) ] + ON DELETE/ON UPDATE
Ref("ReferencesConstraintGrammar"),
),
Ref("CheckConstraintGrammar", optional=True),
),
)
class TableIndexSegment(BaseSegment):
"""A table index, e.g. for CREATE TABLE."""
# https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15
type = "table_index_segment"
match_grammar = Sequence(
Sequence("INDEX", Ref("ObjectReferenceSegment"), optional=True),
OneOf(
Sequence(
Sequence("UNIQUE", optional=True),
OneOf("CLUSTERED", "NONCLUSTERED", optional=True),
Ref("BracketedIndexColumnListGrammar"),
),
Sequence("CLUSTERED", "COLUMNSTORE"),
Sequence(
Sequence("NONCLUSTERED", optional=True),
"COLUMNSTORE",
Ref("BracketedColumnReferenceListGrammar"),
),
),
Ref("RelationalIndexOptionsSegment", optional=True),
Ref("OnPartitionOrFilegroupOptionSegment", optional=True),
Ref("FilestreamOnOptionSegment", optional=True),
)
class BracketedIndexColumnListGrammar(BaseSegment):
"""list of columns used for CREATE INDEX, constraints."""
type = "bracketed_index_column_list_grammar"
match_grammar = Sequence(
Bracketed(
Delimited(
Ref("IndexColumnDefinitionSegment"),
)
)
)
class FilegroupNameSegment(BaseSegment):
"""Filegroup Name Segment."""
type = "filegroup_name"
match_grammar = Ref("SingleIdentifierGrammar")
class FilegroupClause(BaseSegment):
"""Filegroup Clause segment.
https://docs.microsoft.com/en-us/sql/relational-databases/databases/database-files-and-filegroups?view=sql-server-ver15
"""
type = "filegroup_clause"
match_grammar = Sequence(
"ON",
Ref("FilegroupNameSegment"),
)
class IdentityGrammar(BaseSegment):
"""`IDENTITY (1,1)` in table schemas.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql-identity-property?view=sql-server-ver15
"""
type = "identity_grammar"
match_grammar = Sequence(
"IDENTITY",
# optional (seed, increment) e.g. (1, 1)
Bracketed(
Sequence(
Ref("NumericLiteralSegment"),
Ref("CommaSegment"),
Ref("NumericLiteralSegment"),
),
optional=True,
),
)
class EncryptedWithGrammar(BaseSegment):
"""ENCRYPTED WITH in table schemas.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql-identity-property?view=sql-server-ver15
"""
type = "encrypted_with_grammar"
match_grammar = Sequence(
"ENCRYPTED",
"WITH",
Bracketed(
Delimited(
Sequence(
"COLUMN_ENCRYPTION_KEY",
Ref("EqualsSegment"),
Ref("SingleIdentifierGrammar"),
),
Sequence(
"ENCRYPTION_TYPE",
Ref("EqualsSegment"),
OneOf("DETERMINISTIC", "RANDOMIZED"),
),
Sequence(
"ALGORITHM",
Ref("EqualsSegment"),
Ref("QuotedLiteralSegment"),
),
)
),
)
class TableDistributionIndexClause(BaseSegment):
"""`CREATE TABLE` distribution / index clause.
This is specific to Azure Synapse Analytics.
"""
type = "table_distribution_index_clause"
match_grammar = Sequence(
"WITH",
Bracketed(
Delimited(
Ref("TableDistributionClause"),
Ref("TableIndexClause"),
Ref("TableLocationClause"),
),
),
)
class TableDistributionClause(BaseSegment):
"""`CREATE TABLE` distribution clause.
This is specific to Azure Synapse Analytics.
"""
type = "table_distribution_clause"
match_grammar = Sequence(
"DISTRIBUTION",
Ref("EqualsSegment"),
OneOf(
"REPLICATE",
"ROUND_ROBIN",
Sequence(
"HASH",
Bracketed(Ref("ColumnReferenceSegment")),
),
),
)
class TableIndexClause(BaseSegment):
"""`CREATE TABLE` table index clause.
This is specific to Azure Synapse Analytics.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-azure-sql-data-warehouse?view=aps-pdw-2016-au7#TableOptions
"""
type = "table_index_clause"
match_grammar = Sequence(
OneOf(
"HEAP",
Sequence(
"CLUSTERED",
"COLUMNSTORE",
"INDEX",
Sequence(
"ORDER",
Bracketed(
Delimited(
Ref("ColumnReferenceSegment"),
),
),
optional=True,
),
),
Sequence(
"CLUSTERED",
"INDEX",
Bracketed(
Delimited(
Ref("ColumnReferenceSegment"),
OneOf(
"ASC",
"DESC",
optional=True,
),
),
),
),
),
)
class TableLocationClause(BaseSegment):
"""`CREATE TABLE` location clause.
This is specific to Azure Synapse Analytics (deprecated) or to an external table.
"""
type = "table_location_clause"
match_grammar = Sequence(
"LOCATION",
Ref("EqualsSegment"),
OneOf(
"USER_DB", # Azure Synapse Analytics specific
Ref("QuotedLiteralSegment"), # External Table
),
)
class AlterTableSwitchStatementSegment(BaseSegment):
"""An `ALTER TABLE SWITCH` statement."""
type = "alter_table_switch_statement"
# https://docs.microsoft.com/en-us/sql/t-sql/statements/alter-table-transact-sql?view=sql-server-ver15
# T-SQL's ALTER TABLE SWITCH grammar is different enough to core ALTER TABLE grammar
# to merit its own definition
match_grammar = Sequence(
"ALTER",
"TABLE",
Ref("ObjectReferenceSegment"),
"SWITCH",
Sequence("PARTITION", Ref("NumericLiteralSegment"), optional=True),
"TO",
Ref("ObjectReferenceSegment"),
Sequence( # Azure Synapse Analytics specific
"WITH",
Bracketed("TRUNCATE_TARGET", Ref("EqualsSegment"), OneOf("ON", "OFF")),
optional=True,
),
Ref("DelimiterGrammar", optional=True),
)
class CreateTableAsSelectStatementSegment(BaseSegment):
"""A `CREATE TABLE AS SELECT` statement.
This is specific to Azure Synapse Analytics.
"""
type = "create_table_as_select_statement"
# https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-as-select-azure-sql-data-warehouse?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true
match_grammar = Sequence(
"CREATE",
"TABLE",
Ref("TableReferenceSegment"),
Ref("TableDistributionIndexClause"),
"AS",
OptionallyBracketed(Ref("SelectableGrammar")),
Ref("OptionClauseSegment", optional=True),
Ref("DelimiterGrammar", optional=True),
)
class TransactionStatementSegment(BaseSegment):
"""A `COMMIT`, `ROLLBACK` or `TRANSACTION` statement."""
type = "transaction_statement"
match_grammar = OneOf(
# [ BEGIN | SAVE ] [ TRANSACTION | TRAN ] [ <Name> | <Variable> ]
# COMMIT [ TRANSACTION | TRAN | WORK ]
# ROLLBACK [ TRANSACTION | TRAN | WORK ] [ <Name> | <Variable> ]
# https://docs.microsoft.com/en-us/sql/t-sql/language-elements/begin-transaction-transact-sql?view=sql-server-ver15
Sequence(
"BEGIN",
Sequence("DISTRIBUTED", optional=True),
Ref("TransactionGrammar"),
Ref("SingleIdentifierGrammar", optional=True),
Sequence("WITH", "MARK", Ref("QuotedIdentifierSegment"), optional=True),
Ref("DelimiterGrammar", optional=True),
),
Sequence(
OneOf("COMMIT", "ROLLBACK"),
Ref("TransactionGrammar", optional=True),
OneOf(
Ref("SingleIdentifierGrammar"),
Ref("VariableIdentifierSegment"),
optional=True,
),
Ref("DelimiterGrammar", optional=True),
),
Sequence(
OneOf("COMMIT", "ROLLBACK"),
Sequence("WORK", optional=True),
Ref("DelimiterGrammar", optional=True),
),
Sequence(
"SAVE",
Ref("TransactionGrammar"),
OneOf(
Ref("SingleIdentifierGrammar"),
Ref("VariableIdentifierSegment"),
optional=True,
),
Ref("DelimiterGrammar", optional=True),
),
)
class BeginEndSegment(BaseSegment):
"""A `BEGIN/END` block.
Encloses multiple statements into a single statement object.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/begin-end-transact-sql?view=sql-server-ver15
"""
type = "begin_end_block"
match_grammar = Sequence(
"BEGIN",
Ref("DelimiterGrammar", optional=True),
Indent,
Ref("OneOrMoreStatementsGrammar"),
Dedent,
"END",
)
class TryCatchSegment(BaseSegment):
"""A `TRY/CATCH` block pair.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/try-catch-transact-sql?view=sql-server-ver15
"""
type = "try_catch"
match_grammar = Sequence(
"BEGIN",
"TRY",
Ref("DelimiterGrammar", optional=True),
Indent,
Ref("OneOrMoreStatementsGrammar"),
Dedent,
"END",
"TRY",
"BEGIN",
"CATCH",
Ref("DelimiterGrammar", optional=True),
Indent,
Ref("OneOrMoreStatementsGrammar"),
Dedent,
"END",
"CATCH",
)
class BatchSegment(BaseSegment):
"""A segment representing a GO batch within a file or script."""
type = "batch"
match_grammar = OneOf(
# Things that can be bundled
Ref("OneOrMoreStatementsGrammar"),
# Things that can't be bundled
Ref("CreateProcedureStatementSegment"),
)
class FileSegment(BaseFileSegment):
"""A segment representing a whole file or script.
We override default as T-SQL allows concept of several
batches of commands separated by GO as well as usual
semicolon-separated statement lines.
This is also the default "root" segment of the dialect,
and so is usually instantiated directly. It therefore
has no match_grammar.
"""
# NB: We don't need a match_grammar here because we're
# going straight into instantiating it directly usually.
parse_grammar = Sequence(
AnyNumberOf(Ref("BatchDelimiterGrammar")),
Delimited(
Ref("BatchSegment"),
delimiter=AnyNumberOf(
Sequence(
Ref("DelimiterGrammar", optional=True), Ref("BatchDelimiterGrammar")
),
min_times=1,
),
allow_gaps=True,
allow_trailing=True,
),
)
class DeleteStatementSegment(BaseSegment):
"""A `DELETE` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/delete-transact-sql?view=sql-server-ver15
Overriding ANSI to remove StartsWith logic which assumes statements have been
delimited and to allow for Azure Synapse Analytics-specific DELETE statements
"""
type = "delete_statement"
# match grammar. This one makes sense in the context of knowing that it's
# definitely a statement, we just don't know what type yet.
match_grammar = Sequence(
"DELETE",
Ref("TableReferenceSegment", optional=True), # Azure Synapse Analytics-specific
Ref("FromClauseSegment"),
Ref("PostTableExpressionGrammar", optional=True),
Ref("OutputClauseSegment", optional=True),
Ref("WhereClauseSegment", optional=True),
Ref("DelimiterGrammar", optional=True),
)
class FromClauseSegment(BaseSegment):
"""A `FROM` clause like in `SELECT`.
NOTE: this is a delimited set of table expressions, with a variable
number of optional join clauses with those table expressions. The
delmited aspect is the higher of the two such that the following is
valid (albeit unusual):
```
SELECT *
FROM a JOIN b, c JOIN d
```
Overriding ANSI to remove Delimited logic which assumes statements have been
delimited
"""
type = "from_clause"
match_grammar = Sequence(
"FROM",
Delimited(Ref("FromExpressionSegment")),
Ref("DelimiterGrammar", optional=True),
)
get_eventual_aliases = ansi.FromClauseSegment.get_eventual_aliases
class TableExpressionSegment(BaseSegment):
"""The main table expression e.g. within a FROM clause.
In SQL standard, as well as T-SQL, table expressions (`table reference` in SQL
standard) can also be join tables, optionally bracketed, allowing for nested joins.
"""
type = "table_expression"
match_grammar: Matchable = OneOf(
Ref("ValuesClauseSegment"),
Ref("BareFunctionSegment"),
Ref("FunctionSegment"),
Ref("TableReferenceSegment"),
# Nested Selects
Bracketed(Ref("SelectableGrammar")),
Bracketed(Ref("MergeStatementSegment")),
Bracketed(
Sequence(
Ref("TableExpressionSegment"),
Conditional(Dedent, indented_joins=False),
OneOf(Ref("JoinClauseSegment"), Ref("JoinLikeClauseGrammar")),
Conditional(Dedent, indented_joins=True),
)
),
)
class GroupByClauseSegment(BaseSegment):
"""A `GROUP BY` clause like in `SELECT`.
Overriding ANSI to remove Delimited logic which assumes statements have been
delimited
"""
type = "groupby_clause"
match_grammar = Sequence(
"GROUP",
"BY",
Indent,
OneOf(
Ref("ColumnReferenceSegment"),
# Can `GROUP BY 1`
Ref("NumericLiteralSegment"),
# Can `GROUP BY coalesce(col, 1)`
Ref("ExpressionSegment"),
),
AnyNumberOf(
Ref("CommaSegment"),
OneOf(
Ref("ColumnReferenceSegment"),
# Can `GROUP BY 1`
Ref("NumericLiteralSegment"),
# Can `GROUP BY coalesce(col, 1)`
Ref("ExpressionSegment"),
),
),
Dedent,
)
class HavingClauseSegment(BaseSegment):
"""A `HAVING` clause like in `SELECT`.
Overriding ANSI to remove StartsWith with greedy terminator
"""
type = "having_clause"
match_grammar = Sequence(
"HAVING",
Indent,
OptionallyBracketed(Ref("ExpressionSegment")),
Dedent,
)
class OrderByClauseSegment(BaseSegment):
"""A `ORDER BY` clause like in `SELECT`.
Overriding ANSI to remove StartsWith logic which assumes statements have been
delimited
"""
type = "orderby_clause"
match_grammar = Sequence(
"ORDER",
"BY",
Indent,
Delimited(
Sequence(
OneOf(
Ref("ColumnReferenceSegment"),
# Can `ORDER BY 1`
Ref("NumericLiteralSegment"),
# Can order by an expression
Ref("ExpressionSegment"),
),
OneOf("ASC", "DESC", optional=True),
),
),
Dedent,
)
class RenameStatementSegment(BaseSegment):
"""`RENAME` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/rename-transact-sql?view=aps-pdw-2016-au7
Azure Synapse Analytics-specific.
"""
type = "rename_statement"
match_grammar = Sequence(
"RENAME",
"OBJECT",
Ref("ObjectReferenceSegment"),
"TO",
Ref("SingleIdentifierGrammar"),
Ref("DelimiterGrammar", optional=True),
)
class DropTableStatementSegment(ansi.DropTableStatementSegment):
"""A `DROP TABLE` statement.
Overriding ANSI to add optional delimiter.
"""
match_grammar = ansi.DropTableStatementSegment.match_grammar.copy(
insert=[
Ref("DelimiterGrammar", optional=True),
],
)
class DropViewStatementSegment(ansi.DropViewStatementSegment):
"""A `DROP VIEW` statement.
Overriding ANSI to add optional delimiter.
"""
match_grammar = ansi.DropViewStatementSegment.match_grammar.copy(
insert=[
Ref("DelimiterGrammar", optional=True),
],
)
class DropUserStatementSegment(ansi.DropUserStatementSegment):
"""A `DROP USER` statement.
Overriding ANSI to add optional delimiter.
"""
match_grammar = ansi.DropUserStatementSegment.match_grammar.copy(
insert=[
Ref("DelimiterGrammar", optional=True),
],
)
class UpdateStatementSegment(BaseSegment):
"""An `Update` statement.
UPDATE <table name> SET <set clause list> [ WHERE <search condition> ]
Overriding ANSI in order to allow for PostTableExpressionGrammar (table hints)
"""
type = "update_statement"
match_grammar = Sequence(
"UPDATE",
Indent,
OneOf(Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar")),
Ref("PostTableExpressionGrammar", optional=True),
Ref("SetClauseListSegment"),
Dedent,
Ref("OutputClauseSegment", optional=True),
Ref("FromClauseSegment", optional=True),
Ref("WhereClauseSegment", optional=True),
Ref("OptionClauseSegment", optional=True),
Ref("DelimiterGrammar", optional=True),
)
class SetClauseListSegment(BaseSegment):
"""set clause list.
Overriding ANSI to remove Delimited
"""
type = "set_clause_list"
match_grammar = Sequence(
"SET",
Indent,
Ref("SetClauseSegment"),
AnyNumberOf(
Ref("CommaSegment"),
Ref("SetClauseSegment"),
),
Dedent,
)
class SetClauseSegment(BaseSegment):
"""Set clause.
Overriding ANSI to allow for ExpressionSegment on the right
"""
type = "set_clause"
match_grammar = Sequence(
Ref("ColumnReferenceSegment"),
Ref("AssignmentOperatorSegment"),
Ref("ExpressionSegment"),
)
class PrintStatementSegment(BaseSegment):
"""PRINT statement segment."""
type = "print_statement"
match_grammar = Sequence(
"PRINT",
Ref("ExpressionSegment"),
Ref("DelimiterGrammar", optional=True),
)
class OptionClauseSegment(BaseSegment):
"""Query Hint clause.
https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-query?view=sql-server-ver15
"""
type = "option_clause"
match_grammar = Sequence(
Sequence("OPTION", optional=True),
Bracketed(
Delimited(Ref("QueryHintSegment")),
),
)
class QueryHintSegment(BaseSegment):
"""Query Hint segment.
https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-query?view=sql-server-ver15
"""
type = "query_hint_segment"
match_grammar = OneOf(
Sequence( # Azure Synapse Analytics specific
"LABEL",
Ref("EqualsSegment"),
Ref("QuotedLiteralSegment"),
),
Sequence(
OneOf("HASH", "ORDER"),
"GROUP",
),
Sequence(OneOf("MERGE", "HASH", "CONCAT"), "UNION"),
Sequence(OneOf("LOOP", "MERGE", "HASH"), "JOIN"),
Sequence("EXPAND", "VIEWS"),
Sequence(
OneOf(
"FAST",
"MAXDOP",
"MAXRECURSION",
"QUERYTRACEON",
Sequence(
OneOf(
"MAX_GRANT_PERCENT",
"MIN_GRANT_PERCENT",
),
Ref("EqualsSegment"),
),
),
Ref("NumericLiteralSegment"),
),
Sequence("FORCE", "ORDER"),
Sequence(
OneOf("FORCE", "DISABLE"),
OneOf("EXTERNALPUSHDOWN", "SCALEOUTEXECUTION"),
),
Sequence(
OneOf(
"KEEP",
"KEEPFIXED",
"ROBUST",
),
"PLAN",
),
"IGNORE_NONCLUSTERED_COLUMNSTORE_INDEX",
"NO_PERFORMANCE_SPOOL",
Sequence(
"OPTIMIZE",
"FOR",
OneOf(
"UNKNOWN",
Bracketed(
Ref("ParameterNameSegment"),
OneOf(
"UNKNOWN", Sequence(Ref("EqualsSegment"), Ref("LiteralGrammar"))
),
AnyNumberOf(
Ref("CommaSegment"),
Ref("ParameterNameSegment"),
OneOf(
"UNKNOWN",
Sequence(Ref("EqualsSegment"), Ref("LiteralGrammar")),
),
),
),
),
),
Sequence("PARAMETERIZATION", OneOf("SIMPLE", "FORCED")),
"RECOMPILE",
Sequence(
"USE",
"HINT",
Bracketed(
Ref("QuotedLiteralSegment"),
AnyNumberOf(Ref("CommaSegment"), Ref("QuotedLiteralSegment")),
),
),
Sequence(
"USE",
"PLAN",
OneOf(Ref("QuotedLiteralSegment"), Ref("QuotedLiteralSegmentWithN")),
),
Sequence(
"TABLE",
"HINT",
Ref("ObjectReferenceSegment"),
Delimited(Ref("TableHintSegment")),
),
)
class PostTableExpressionGrammar(BaseSegment):
"""Table Hint clause. Overloading the PostTableExpressionGrammar to implement.
https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-ver15
"""
type = "post_table_expression"
match_grammar = Sequence(
Sequence("WITH", optional=True),
Bracketed(
Ref("TableHintSegment"),
AnyNumberOf(
Ref("CommaSegment"),
Ref("TableHintSegment"),
),
),
)
class TableHintSegment(BaseSegment):
"""Table Hint segment.
https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-ver15
"""
type = "query_hint_segment"
match_grammar = OneOf(
"NOEXPAND",
Sequence(
"INDEX",
Bracketed(
Delimited(
OneOf(Ref("IndexReferenceSegment"), Ref("NumericLiteralSegment")),
),
),
),
Sequence(
"INDEX",
Ref("EqualsSegment"),
Bracketed(
OneOf(Ref("IndexReferenceSegment"), Ref("NumericLiteralSegment")),
),
),
"KEEPIDENTITY",
"KEEPDEFAULTS",
Sequence(
"FORCESEEK",
Bracketed(
Ref("IndexReferenceSegment"),
Bracketed(
Ref("SingleIdentifierGrammar"),
AnyNumberOf(Ref("CommaSegment"), Ref("SingleIdentifierGrammar")),
),
optional=True,
),
),
"FORCESCAN",
"HOLDLOCK",
"IGNORE_CONSTRAINTS",
"IGNORE_TRIGGERS",
"NOLOCK",
"NOWAIT",
"PAGLOCK",
"READCOMMITTED",
"READCOMMITTEDLOCK",
"READPAST",
"READUNCOMMITTED",
"REPEATABLEREAD",
"ROWLOCK",
"SERIALIZABLE",
"SNAPSHOT",
Sequence(
"SPATIAL_WINDOW_MAX_CELLS",
Ref("EqualsSegment"),
Ref("NumericLiteralSegment"),
),
"TABLOCK",
"TABLOCKX",
"UPDLOCK",
"XLOCK",
)
class SetOperatorSegment(BaseSegment):
"""A set operator such as Union, Except or Intersect.
Override ANSI to remove TSQL non-keyword MINUS.
"""
type = "set_operator"
match_grammar = OneOf(
Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)),
"INTERSECT",
"EXCEPT",
)
class SetExpressionSegment(BaseSegment):
"""A set expression with either Union, Minus, Except or Intersect.
Overriding ANSI to include OPTION clause.
"""
type = "set_expression"
# match grammar
match_grammar = Sequence(
Ref("NonSetSelectableGrammar"),
AnyNumberOf(
Sequence(
Ref("SetOperatorSegment"),
Ref("NonSetSelectableGrammar"),
),
min_times=1,
),
Ref("OrderByClauseSegment", optional=True),
Ref("OptionClauseSegment", optional=True),
Ref("DelimiterGrammar", optional=True),
)
class ExecuteScriptSegment(BaseSegment):
"""`EXECUTE` statement.
Matching segment name and type from exasol.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/execute-transact-sql?view=sql-server-ver15
"""
type = "execute_script_statement"
match_grammar = Sequence(
OneOf("EXEC", "EXECUTE"),
Sequence(Ref("ParameterNameSegment"), Ref("EqualsSegment"), optional=True),
OptionallyBracketed(Ref("ObjectReferenceSegment")),
Indent,
Sequence(
Sequence(Ref("ParameterNameSegment"), Ref("EqualsSegment"), optional=True),
OneOf(
"DEFAULT",
Ref("LiteralGrammar"),
Ref("ParameterNameSegment"),
Ref("SingleIdentifierGrammar"),
),
Sequence("OUTPUT", optional=True),
AnyNumberOf(
Ref("CommaSegment"),
Sequence(
Ref("ParameterNameSegment"), Ref("EqualsSegment"), optional=True
),
OneOf(
"DEFAULT",
Ref("LiteralGrammar"),
Ref("ParameterNameSegment"),
Ref("SingleIdentifierGrammar"),
),
Sequence("OUTPUT", optional=True),
),
optional=True,
),
Dedent,
Ref("DelimiterGrammar", optional=True),
)
class CreateSchemaStatementSegment(BaseSegment):
"""A `CREATE SCHEMA` statement.
Overriding ANSI to allow for AUTHORIZATION clause
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-schema-transact-sql?view=sql-server-ver15
Not yet implemented: proper schema_element parsing.
Once we have an AccessStatementSegment that works for TSQL, this definition should
be tweaked to include schema elements.
"""
type = "create_schema_statement"
match_grammar = Sequence(
"CREATE",
"SCHEMA",
Ref("SchemaReferenceSegment"),
Sequence(
"AUTHORIZATION",
Ref("SingleIdentifierGrammar"),
optional=True,
),
Ref(
"DelimiterGrammar",
optional=True,
),
)
class MergeMatchSegment(BaseSegment):
"""Contains dialect specific merge operations."""
type = "merge_match"
match_grammar = Sequence(
AnyNumberOf(
Ref("MergeMatchedClauseSegment"),
Ref("MergeNotMatchedClauseSegment"),
min_times=1,
),
Ref("OutputClauseSegment", optional=True),
Ref("OptionClauseSegment", optional=True),
)
class MergeMatchedClauseSegment(BaseSegment):
"""The `WHEN MATCHED` clause within a `MERGE` statement."""
type = "merge_when_matched_clause"
match_grammar = Sequence(
"WHEN",
"MATCHED",
Sequence(
"AND",
Ref("ExpressionSegment"),
optional=True,
),
Indent,
"THEN",
OneOf(
Ref("MergeUpdateClauseSegment"),
Ref("MergeDeleteClauseSegment"),
),
Dedent,
)
class MergeNotMatchedClauseSegment(BaseSegment):
"""The `WHEN NOT MATCHED` clause within a `MERGE` statement."""
type = "merge_when_not_matched_clause"
match_grammar = OneOf(
Sequence(
"WHEN",
"NOT",
"MATCHED",
Sequence("BY", "TARGET", optional=True),
Sequence("AND", Ref("ExpressionSegment"), optional=True),
Indent,
"THEN",
Ref("MergeInsertClauseSegment"),
Dedent,
),
Sequence(
"WHEN",
"NOT",
"MATCHED",
"BY",
"SOURCE",
Sequence("AND", Ref("ExpressionSegment"), optional=True),
Indent,
"THEN",
OneOf(
Ref("MergeUpdateClauseSegment"),
Ref("MergeDeleteClauseSegment"),
),
Dedent,
),
)
class MergeInsertClauseSegment(BaseSegment):
"""`INSERT` clause within the `MERGE` statement."""
type = "merge_insert_clause"
match_grammar = Sequence(
"INSERT",
Indent,
Ref("BracketedColumnReferenceListGrammar", optional=True),
Dedent,
"VALUES",
Indent,
OneOf(
Bracketed(
Delimited(
AnyNumberOf(
Ref("ExpressionSegment"),
),
),
),
Sequence(
"DEFAULT",
"VALUES",
),
),
Dedent,
)
class OutputClauseSegment(BaseSegment):
"""OUTPUT Clause used within DELETE, INSERT, UPDATE, MERGE.
https://docs.microsoft.com/en-us/sql/t-sql/queries/output-clause-transact-sql?view=sql-server-ver15
"""
type = "output_clause"
match_grammar = AnyNumberOf(
Sequence(
"OUTPUT",
Indent,
Delimited(
AnyNumberOf(
Ref("WildcardExpressionSegment"),
Sequence(
Ref("BaseExpressionElementGrammar"),
Ref("AliasExpressionSegment", optional=True),
),
Ref("SingleIdentifierGrammar"),
),
),
Dedent,
Sequence(
"INTO",
Indent,
Ref("TableReferenceSegment"),
Bracketed(
Delimited(
Ref("ColumnReferenceSegment"),
),
optional=True,
),
Dedent,
optional=True,
),
),
)
class ThrowStatementSegment(BaseSegment):
"""A THROW statement.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/throw-transact-sql?view=sql-server-ver15
"""
type = "throw_statement"
match_grammar = Sequence(
"THROW",
Sequence(
OneOf(
# error_number
Ref("NumericLiteralSegment"),
Ref("ParameterNameSegment"),
),
Ref("CommaSegment"),
OneOf(
# message
Ref("QuotedLiteralSegment"),
Ref("QuotedLiteralSegmentWithN"),
Ref("ParameterNameSegment"),
),
Ref("CommaSegment"),
OneOf(
# state
Ref("NumericLiteralSegment"),
Ref("ParameterNameSegment"),
),
optional=True,
),
)
class RaiserrorStatementSegment(BaseSegment):
"""RAISERROR statement.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/raiserror-transact-sql?view=sql-server-ver15
"""
type = "raiserror_statement"
match_grammar = Sequence(
"RAISERROR",
Bracketed(
Delimited(
OneOf(
Ref("NumericLiteralSegment"),
Ref("QuotedLiteralSegment"),
Ref("QuotedLiteralSegmentWithN"),
Ref("ParameterNameSegment"),
),
OneOf(
Ref("NumericLiteralSegment"),
Ref("QualifiedNumericLiteralSegment"),
Ref("ParameterNameSegment"),
),
OneOf(
Ref("NumericLiteralSegment"),
Ref("QualifiedNumericLiteralSegment"),
Ref("ParameterNameSegment"),
),
AnyNumberOf(
Ref("LiteralGrammar"),
Ref("ParameterNameSegment"),
min_times=0,
max_times=20,
),
),
),
Sequence(
"WITH",
Delimited(
"LOG",
"NOWAIT",
"SETERROR",
),
optional=True,
),
)
class WindowSpecificationSegment(BaseSegment):
"""Window specification within OVER(...).
Overriding ANSI to remove window name option not supported by TSQL
"""
type = "window_specification"
match_grammar = Sequence(
Ref("PartitionClauseSegment", optional=True),
Ref("OrderByClauseSegment", optional=True),
Ref("FrameClauseSegment", optional=True),
optional=True,
ephemeral_name="OverClauseContent",
)
class GotoStatement(BaseSegment):
"""GOTO statement.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/goto-transact-sql?view=sql-server-ver15
"""
type = "goto_statement"
match_grammar = Sequence("GOTO", Ref("SingleIdentifierGrammar"))
class CreateTriggerStatementSegment(BaseSegment):
"""Create Trigger Statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-trigger-transact-sql?view=sql-server-ver15
"""
type = "create_trigger"
match_grammar: Matchable = Sequence(
"CREATE",
"TRIGGER",
Ref("TriggerReferenceSegment"),
"ON",
OneOf(
Ref("TableReferenceSegment"),
Sequence("ALL", "SERVER"),
"DATABASE",
),
Sequence(
"WITH",
OneOf(
Sequence(
Ref.keyword("ENCRYPTION", optional=True),
Sequence(
"EXECUTE",
"AS",
Ref("SingleQuotedIdentifierSegment"),
optional=True,
),
),
Sequence(
Ref.keyword("NATIVE_COMPILATION", optional=True),
Ref.keyword("SCHEMABINDING", optional=True),
Sequence(
"EXECUTE",
"AS",
Ref("SingleQuotedIdentifierSegment"),
optional=True,
),
),
Sequence(
Ref.keyword("ENCRYPTION", optional=True),
Sequence(
"EXECUTE",
"AS",
Ref("SingleQuotedIdentifierSegment"),
optional=True,
),
),
),
optional=True,
),
OneOf(
Sequence("FOR", Delimited(Ref("SingleIdentifierGrammar"), optional=True)),
"AFTER",
Sequence("INSTEAD", "OF"),
optional=True,
),
Delimited(
"INSERT",
"UPDATE",
"DELETE",
optional=True,
),
Sequence("WITH", "APPEND", optional=True),
Sequence("NOT", "FOR", "REPLICATION", optional=True),
"AS",
Ref("OneOrMoreStatementsGrammar"),
# TODO: EXTERNAL NAME
)
class DropTriggerStatementSegment(BaseSegment):
"""Drop Trigger Statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/drop-trigger-transact-sql?view=sql-server-ver15
"""
type = "drop_trigger"
match_grammar: Matchable = Sequence(
"DROP",
"TRIGGER",
Ref("IfExistsGrammar", optional=True),
Delimited(Ref("TriggerReferenceSegment")),
Sequence("ON", OneOf("DATABASE", Sequence("ALL", "SERVER")), optional=True),
)
class DisableTriggerStatementSegment(BaseSegment):
"""Disable Trigger Statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/disable-trigger-transact-sql?view=sql-server-ver15
"""
type = "disable_trigger"
match_grammar: Matchable = Sequence(
"DISABLE",
"TRIGGER",
OneOf(
Delimited(Ref("TriggerReferenceSegment")),
"ALL",
),
Sequence(
"ON",
OneOf(Ref("ObjectReferenceSegment"), "DATABASE", Sequence("ALL", "SERVER")),
optional=True,
),
)
class LabelStatementSegment(BaseSegment):
"""Label Statement, for a GOTO statement.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/goto-transact-sql?view=sql-server-ver15
"""
type = "label_segment"
match_grammar: Matchable = Sequence(
Ref("NakedIdentifierSegment"), Ref("ColonSegment"), allow_gaps=False
)
class AccessStatementSegment(BaseSegment):
"""A `GRANT` or `REVOKE` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/grant-transact-sql?view=sql-server-ver15
https://docs.microsoft.com/en-us/sql/t-sql/statements/deny-transact-sql?view=sql-server-ver15
https://docs.microsoft.com/en-us/sql/t-sql/statements/revoke-transact-sql?view=sql-server-ver15
"""
type = "access_statement"
# Privileges that can be set on the account (specific to snowflake)
_global_permissions = OneOf(
Sequence(
"CREATE",
OneOf(
"ROLE",
"USER",
"WAREHOUSE",
"DATABASE",
"INTEGRATION",
),
),
Sequence("APPLY", "MASKING", "POLICY"),
"EXECUTE",
)
_schema_object_names = [
"TABLE",
"VIEW",
"FUNCTION",
"PROCEDURE",
"SEQUENCE",
]
_schema_object_types = OneOf(
*_schema_object_names,
Sequence("EXTERNAL", "TABLE"),
Sequence("FILE", "FORMAT"),
)
# We reuse the object names above and simply append an `S` to the end of them to get
# plurals
_schema_object_types_plural = OneOf(
*[f"{object_name}S" for object_name in _schema_object_names]
)
_permissions = Sequence(
OneOf(
"ALTER",
"CONTROL",
"DELETE",
"EXECUTE",
"INSERT",
"RECEIVE",
"REFERENCES",
"SELECT",
Sequence("TAKE", "OWNERSHIP"),
"UPDATE",
Sequence("VIEW", "CHANGE", "TRACKING"),
Sequence("VIEW", "DEFINITION"),
),
Ref("BracketedColumnReferenceListGrammar", optional=True),
)
# All of the object types that we can grant permissions on.
# This list will contain ansi sql objects as well as dialect specific ones.
_objects = Sequence(
OneOf(
"DATABASE",
"LANGUAGE",
"SCHEMA",
"ROLE",
"TYPE",
Sequence(
"FOREIGN",
OneOf("SERVER", Sequence("DATA", "WRAPPER")),
),
Sequence("ALL", "SCHEMAS", "IN", "DATABASE"),
_schema_object_types,
Sequence("ALL", _schema_object_types_plural, "IN", "SCHEMA"),
optional=True,
),
Delimited(Ref("ObjectReferenceSegment"), terminator=OneOf("TO", "FROM")),
Ref("FunctionParameterListGrammar", optional=True),
)
match_grammar: Matchable = OneOf(
# Based on https://www.postgresql.org/docs/13/sql-grant.html
# and https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html
Sequence(
"GRANT",
OneOf(
Sequence(
Delimited(
OneOf(_global_permissions, _permissions),
delimiter=Ref("CommaSegment"),
terminator="ON",
),
),
Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)),
),
"ON",
Sequence(
OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"),
Ref("CastOperatorSegment"),
optional=True,
),
_objects,
"TO",
Delimited(
OneOf(Ref("ObjectReferenceSegment"), Ref("FunctionSegment")),
delimiter=Ref("CommaSegment"),
),
OneOf(
Sequence("WITH", "GRANT", "OPTION"),
optional=True,
),
Sequence(
"AS",
Ref("ObjectReferenceSegment"),
optional=True,
),
),
Sequence(
"DENY",
OneOf(
Delimited(
OneOf(_global_permissions, _permissions),
delimiter=Ref("CommaSegment"),
terminator="ON",
),
Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)),
),
"ON",
Sequence(
OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"),
Ref("CastOperatorSegment"),
optional=True,
),
_objects,
OneOf("TO"),
Delimited(
Ref("ObjectReferenceSegment"),
delimiter=Ref("CommaSegment"),
),
Sequence(
Ref.keyword("CASCADE", optional=True),
Ref("ObjectReferenceSegment", optional=True),
optional=True,
),
),
Sequence(
"REVOKE",
Sequence("GRANT", "OPTION", "FOR", optional=True),
OneOf(
Delimited(
OneOf(_global_permissions, _permissions),
delimiter=Ref("CommaSegment"),
terminator="ON",
),
Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)),
),
"ON",
Sequence(
OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"),
Ref("CastOperatorSegment"),
optional=True,
),
_objects,
OneOf("TO", "FROM"),
Delimited(
Ref("ObjectReferenceSegment"),
delimiter=Ref("CommaSegment"),
),
Sequence(
Ref.keyword("CASCADE", optional=True),
Ref("ObjectReferenceSegment", optional=True),
optional=True,
),
),
)
class CreateTypeStatementSegment(BaseSegment):
"""A `CREATE TYPE` statement.
https://docs.microsoft.com/en-us/sql/t-sql/statements/create-type-transact-sql?view=sql-server-ver15
"""
type = "create_type_statement"
match_grammar: Matchable = Sequence(
"CREATE",
"TYPE",
Ref("ObjectReferenceSegment"),
OneOf(
Sequence("FROM", Ref("ObjectReferenceSegment")),
Sequence(
"AS",
"TABLE",
Sequence(
Bracketed(
Delimited(
OneOf(
Ref("TableConstraintSegment"),
Ref("ColumnDefinitionSegment"),
Ref("TableIndexSegment"),
),
allow_trailing=True,
)
),
),
),
),
)
class OpenCursorStatementSegment(BaseSegment):
"""An `OPEN` cursor statement.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/open-transact-sql?view=sql-server-ver15
"""
type = "open_cursor_statement"
match_grammar: Matchable = Sequence(
"OPEN",
OneOf(
Sequence(
Ref.keyword("GLOBAL", optional=True), Ref("NakedIdentifierSegment")
),
Ref("ParameterNameSegment"),
),
)
class CloseCursorStatementSegment(BaseSegment):
"""A `CLOSE` cursor statement.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/close-transact-sql?view=sql-server-ver15
"""
type = "close_cursor_statement"
match_grammar: Matchable = Sequence(
"CLOSE",
OneOf(
Sequence(
Ref.keyword("GLOBAL", optional=True), Ref("NakedIdentifierSegment")
),
Ref("ParameterNameSegment"),
),
)
class DeallocateCursorStatementSegment(BaseSegment):
"""A `DEALLOCATE` cursor statement.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/deallocate-transact-sql?view=sql-server-ver15
"""
type = "deallocate_cursor_statement"
match_grammar: Matchable = Sequence(
"DEALLOCATE",
OneOf(
Sequence(
Ref.keyword("GLOBAL", optional=True), Ref("NakedIdentifierSegment")
),
Ref("ParameterNameSegment"),
),
)
class FetchCursorStatementSegment(BaseSegment):
"""A `FETCH` cursor statement.
https://docs.microsoft.com/en-us/sql/t-sql/language-elements/fetch-transact-sql?view=sql-server-ver15
"""
type = "fetch_cursor_statement"
match_grammar: Matchable = Sequence(
"FETCH",
OneOf("NEXT", "PRIOR", "FIRST", "LAST", optional=True),
"FROM",
OneOf(
Sequence(
Ref.keyword("GLOBAL", optional=True), Ref("NakedIdentifierSegment")
),
Ref("ParameterNameSegment"),
),
Sequence("INTO", Delimited(Ref("ParameterNameSegment")), optional=True),
)
class ForXmlSegment(BaseSegment):
"""A segment for `FOR XML` in `SELECT` statements.
https://docs.microsoft.com/en-us/sql/relational-databases/xml/for-xml-sql-server?view=sql-server-2017
"""
type = "for_xml_segment"
match_grammar: Matchable = Sequence(
"FOR",
"XML",
OneOf(
Sequence("RAW", Bracketed(Ref("QuotedLiteralSegment"), optional=True)),
"AUTO",
"EXPLICIT",
Sequence("PATH", Bracketed(Ref("QuotedLiteralSegment"), optional=True)),
),
)
| 30.160519 | 274 | 0.538575 |
45df4e9d0954a12b01eada2d9dc3e0901f80f073 | 3,942 | py | Python | supports/pyload/src/pyload/plugins/downloaders/RockfileEu.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | 1 | 2020-04-02T17:03:39.000Z | 2020-04-02T17:03:39.000Z | supports/pyload/src/pyload/plugins/downloaders/RockfileEu.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | null | null | null | supports/pyload/src/pyload/plugins/downloaders/RockfileEu.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import urllib.parse
from ...core.network.http.exceptions import BadHeader
from ..anticaptchas.ReCaptcha import ReCaptcha
from ..base.simple_downloader import SimpleDownloader
class RockfileEu(SimpleDownloader):
__name__ = "RockfileEu"
__type__ = "downloader"
__version__ = "0.14"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"https?://(?:www\.)?rockfile\.(?:eu|co)/(?P<ID>\w{12}).html"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Rockfile.eu downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
NAME_PATTERN = r'name="fname" value="(?P<N>.+?)"'
SIZE_PATTERN = r"var iniFileSize = (\d+)"
WAIT_PATTERN = r'<span id="countdown_str".+?><span .+?>(\d+)</span>'
DL_LIMIT_PATTERN = (
r"You have to wait (?:<b>)?(.+?)(?:</b>)? until you can start another download"
)
OFFLINE_PATTERN = r"File Not Found"
TEMP_OFFLINE_PATTERN = (
r"Connection limit reached|Server error|You have reached the download limit"
)
LINK_FREE_PATTERN = r'href="(http://.+?\.rfservers\.eu.+?)"'
COOKIES = [("rockfile.eu", "lang", "english")]
def setup(self):
self.multi_dl = True
self.chunk_limit = 1
self.resume_download = True
def handle_free(self, pyfile):
url, inputs = self.parse_html_form(input_names={"op": re.compile(r"^download")})
if inputs:
self.data = self.load(pyfile.url, post=inputs)
self.check_errors()
url, inputs = self.parse_html_form('name="F1"')
if not inputs:
self.error("Form F1 not found")
self.captcha = ReCaptcha(pyfile)
captcha_key = self.captcha.detect_key()
if captcha_key:
response, challenge = self.captcha.challenge(captcha_key)
inputs["recaptcha_challenge_field"] = challenge
inputs["recaptcha_response_field"] = response
else:
captcha_code = "".join(
chr(int(x[2:4])) if x[0:2] == "&#" else x
for _, x in sorted(
re.findall(
r'<span style=[\'"]color:#5d5d5d; text-shadow: 1px 1px #f2f2f2;.+?padding-left:(\d+)px;.+?[\'"]>(.+?)</span>',
self.data,
),
key=lambda _i: int(_i[0]),
)
)
if captcha_code:
#: Remove leading zero
captcha_code = (
captcha_code[1:] if captcha_code[0] == "0" else captcha_code
)
#: Remove leading zero
captcha_code = (
captcha_code[1:] if captcha_code[0] == "0" else captcha_code
)
inputs["code"] = captcha_code
else:
self.error("Captcha not found")
self.data = self.load(pyfile.url, post=inputs)
if r"> Preparing download link...<" not in self.data:
self.retry_captcha()
else:
self.captcha.correct()
m = re.search(self.LINK_FREE_PATTERN, self.data)
if m is not None:
self.link = m.group(1)
if self.link and pyfile.name == self.info["pattern"]["ID"] + ".html":
pyfile.name = urllib.parse.unquote(self.link.split("/")[-1])
try:
self.download(self.link)
except BadHeader as exc:
if exc.code == 503:
self.retry()
else:
raise
| 31.536 | 134 | 0.545916 |
a2244a48b2ebbeba27e0636e49a779ba3ba46ff4 | 1,198 | py | Python | api/app.py | georgetzianabos/jinja2online | 03894669cf816ff7b49477ac9c167916cac925c0 | [
"MIT"
] | null | null | null | api/app.py | georgetzianabos/jinja2online | 03894669cf816ff7b49477ac9c167916cac925c0 | [
"MIT"
] | 10 | 2020-09-07T07:23:08.000Z | 2022-03-02T05:32:10.000Z | api/app.py | georgetzianabos/jinja2online | 03894669cf816ff7b49477ac9c167916cac925c0 | [
"MIT"
] | null | null | null | import os
from flask import (
Flask,
request,
jsonify,
send_from_directory,
Response
)
from api import process
import api.example
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 300
@app.route('/')
def home():
return send_from_directory(os.path.abspath('static/'), 'index.html')
@app.route('/api')
def working():
return jsonify({'status': 'working'})
@app.route('/api/process', methods=['POST'])
def process_request():
def get_args():
content = request.json
template = content['template']
values = content['values']
if (type(template) is not str or type(values) is not dict):
raise Exception()
return template, values
try:
args = get_args()
except Exception:
return jsonify({"error" : "Bad Request"}), 400
try:
result = { "result": process(*args) }
except Exception as e:
return jsonify({"error": e.args[0]}), 400
return jsonify(result)
@app.route('/api/example')
def get_example():
example = api.example.get_example()
return jsonify(example)
| 19.322581 | 72 | 0.583472 |
29f39df0d3782632580d887c9df64cc20b9b6537 | 639 | py | Python | 2020/day_01.py | nyanthanya/Contoh-Program | 924d79c34a92e77374228f1605a1d37b0fe37c70 | [
"Unlicense"
] | 105 | 2019-12-09T07:27:43.000Z | 2022-01-28T16:34:37.000Z | 2020/day_01.py | nyanthanya/Contoh-Program | 924d79c34a92e77374228f1605a1d37b0fe37c70 | [
"Unlicense"
] | 1 | 2021-12-11T21:25:47.000Z | 2021-12-12T21:21:35.000Z | 2020/day_01.py | nyanthanya/Contoh-Program | 924d79c34a92e77374228f1605a1d37b0fe37c70 | [
"Unlicense"
] | 9 | 2020-12-06T01:00:11.000Z | 2021-12-14T00:48:43.000Z | import aoc_helper
from itertools import combinations
raw = aoc_helper.day(1)
data = set(aoc_helper.extract_ints(raw))
def part_one():
for a in data:
if (b := (2020 - a)) in data:
return a * b
def part_two():
# We know that at least two of the numbers must be less than half the target.
# Note len(filtered_data) is 5 for my input. We only need to check 10 combinations!
filtered_data = (i for i in data if i < 1010)
for a, b in combinations(filtered_data, 2):
if (c := (2020 - a - b)) in data:
return a * b * c
aoc_helper.submit(1, part_one)
aoc_helper.submit(1, part_two)
| 27.782609 | 88 | 0.643192 |
a7d72dd3f9e812fe8208427326463a6b41d0977b | 4,097 | py | Python | tools/micavis/analysis.py | mica-gossip/MiCA | bdd4848a7f52a6744d6e61647333b0a71a9ae338 | [
"BSD-3-Clause"
] | 5 | 2015-03-03T23:59:34.000Z | 2021-03-20T11:39:33.000Z | tools/micavis/analysis.py | mica-gossip/MiCA | bdd4848a7f52a6744d6e61647333b0a71a9ae338 | [
"BSD-3-Clause"
] | null | null | null | tools/micavis/analysis.py | mica-gossip/MiCA | bdd4848a7f52a6744d6e61647333b0a71a9ae338 | [
"BSD-3-Clause"
] | null | null | null |
from math import *
import logs
def deltas(sequence):
sq = sorted(list(sequence))
it = iter(sq)
prev = it.next()
try:
while True:
x = it.next()
yield x - prev
prev = x
except StopIteration:
pass
# returns a list of tuple-lists, where events have the form:
# (timestamp, src, dst)
#
# one list is returned for each leaf projection
def gossip_events(events):
buckets = {}
gossip_total = 0
leaves_per_gossip = 0.
leaf_sequences = []
dval = {'true':True,'false':False}
def value(leafstatus):
return dval[leafstatus.split(':')[1]]
for e in events:
if e['event_type'] == 'merge-execute-subprotocols':
buckets[e['address']] = [value(x) for x in e['data'].split(',')]
if e['event_type'] == 'mica-gossip':
bk = buckets.get(e['address'],[True])
src, dst = e['data']
tupl = (e['timestamp'], src, dst)
gossip_total += 1
if len(leaf_sequences) < len(bk):
leaf_sequences += [[] for i in xrange(len(bk) - len(leaf_sequences))]
for i,relevant in enumerate(bk):
if relevant:
leaves_per_gossip += 1
leaf_sequences[i].append(tupl)
leaves_per_gossip /= gossip_total
print "Gossip statistics: %s top-level gossips, %s leaves per gossip, total gossip activity %s" % (gossip_total, leaves_per_gossip, leaves_per_gossip * gossip_total)
return leaf_sequences
def compute_changes_per_round(trace, **frequency_count_keywords):
def node_state_change_timestamp_generator():
values = {}
node_states = logs.CurrentValueTracker(
trace.events,
filter_func = logs.state_event_filter,
value_func = lambda e,mv=trace: (e['address'], (e['timestamp'],mv.project(e['data']))) )
for i,addr,(t,data) in node_states.enumerate(yield_events=True):
if addr not in values or values[addr] != data:
values[addr] = data
yield t,addr
return frequency_count(trace,
node_state_change_timestamp_generator(),
**frequency_count_keywords)
# timestamp_generator is an iterable of timestamps, OR an iterable of (timestamp,key) pairs,
# where each unique id can only increment a time bucket once:
# e.g., pairs (12039, "key1"), (12040, "key1") will only count as one if 12039 and 12040 fall into the
# same bucket
#
# return x, y lists for plotting
# x = round bucket number
# y = number of changes in bucket
def frequency_count(trace, timestamp_generator, bucket_size_ms = None, bucket_x = lambda i: i, normalize = False, bucket_scalar = 1.0, subdivisions=1):
if bucket_size_ms is None:
bucket_size_ms = trace.runtime_info.round_ms
if subdivisions > 1:
bucket_size_ms = int(float(bucket_size_ms)/subdivisions)
bucket_x = lambda i,f=bucket_x: f(float(i)/subdivisions)
bucket_scalar *= subdivisions
start_t = trace.runtime_info.first_timestamp
end_t = trace.runtime_info.last_timestamp
buckets = [0] * int(ceil(float(end_t - start_t) / bucket_size_ms))
def bucket(timestamp):
return int(floor((timestamp - start_t) / float(bucket_size_ms)))
keys = {}
for t in timestamp_generator:
try:
t,key = t
b = bucket(t)
# print str((key,b,keys.get(key,-1),keys.get(key,-1)==b, buckets[b]))
if key is not None:
if keys.get(key,-1) != b:
keys[key] = b
buckets[b] += 1
except TypeError, e: # t is a scalar
b = bucket(t)
buckets[b] += 1
if normalize:
n = float(len(trace.unique_addresses))
buckets = [v/n for v in buckets]
if bucket_scalar != 1.0:
buckets = [v*bucket_scalar for v in buckets]
x_values = [bucket_x(i) for i in xrange(len(buckets))]
return x_values, buckets
| 33.581967 | 169 | 0.592873 |
531f6060768e881be112dc678c3780bd90cd60cc | 7,799 | py | Python | las2.py | DiegoPandolfaDiaz/Lidar | 16079dc5143b4bd7400becd335d0c5c06dc97d69 | [
"Apache-2.0"
] | null | null | null | las2.py | DiegoPandolfaDiaz/Lidar | 16079dc5143b4bd7400becd335d0c5c06dc97d69 | [
"Apache-2.0"
] | null | null | null | las2.py | DiegoPandolfaDiaz/Lidar | 16079dc5143b4bd7400becd335d0c5c06dc97d69 | [
"Apache-2.0"
] | null | null | null | import liblas
import sys
import struct
import numpy as np
def main():
#coeficientes de la matriz de rotacion
a = b = c = d = e = f = g = h = i = 0.0;
matrix_rot = np.matrix( ((1,0,0),(0,1,0),(0,0,1)) )
gravity = np.matrix( ((0,0,0),(0,0,0),(9.8,0,0)) )
#posiciones
x_next = 0.0
y_next = 0.0
z_next = 0.0
x = 0.0
y = 0.0
z = 0.0
#velocidades
v_next_x = 0.0
v_next_y = 0.0
v_next_z = 0.0
v_x = 0.0
v_y = 0.0
v_z = 0.0
#aceleraciones
a_prev_x = 0.0
a_prev_y = 0.0
a_prev_z = 0.0
a_x_imu = 0.0 #esta sera la medicion de la imu
a_y_imu = 0.0
a_z_imu = 0.0
a_x = 0.0
a_y = 0.0
a_z = 0.0
#jerk
j_next_x = 0.0
j_next_y = 0.0
j_next_z = 0.0
j_x = 0.0
j_y = 0.0
j_z = 0.0
#difencial de tiempo
dt = 0.001 #de alguna manera hay que medir el tiempo entre samples del lidar
#intervalo confiable y correccion de offset
alpha = 0.0
offset_a_x = 0.0
offset_a_y = 0.0
offset_a_z = 0.0
beta = 0.0
dev_a_x = 0.0
dev_a_y = 0.0
dev_a_z = 0.0
data = ""
rotat = ""
accel = ""
data_lidar = ""
output_lidar = ""
data_lidar = ""
data_imu = ""
data_gps = ""
# for gps
lat = -33.035385
lon = -71.595649
height = 50.0
phi = lat*3.141592/180
lamda = lon*3.141592/180
semi_axis_major = 6378137.0 #metros
semi_axis_minor = 6356752.314
eccentricity = 0.08181919
Radio = semi_axis_major
x_gps = 1690023.993
y_gps = -5079111.743
z_gps = 50.0
flag_gps = False
x_min = float('inf')
x_max = float('-inf')
y_min = float('inf')
y_max = float('-inf')
z_min = float('inf')
z_max = float('-inf')
try:
logs_file = open('output.txt','r')
las_header = liblas.header.Header()
las_file = liblas.file.File('./LASFILES/output.las',header=las_header, mode='w');
for linea in logs_file:
elementos = linea.strip().split(',')
#print "holi 1"
if(len(elementos) == 3):
#print "holi 2"
data_lidar = elementos[0]
data_imu = elementos[1]
data_gps = elementos[2]
#print elementos
#print data_lidar
#print data_imu
#print data_gps
if(data_gps != ""):
#print "holi 3"
lat = float(data_imu.strip().split(' ')[3])
lon = float(data_imu.strip().split(' ')[5])
height = float(data_imu.strip().split(' ')[8])
phi = lat*3.141592/180.0
lamda = lon*3.141592/180.0
Radio = semi_axis_major/np.sqrt((1 - eccentricity*eccentricity*np.sin(phi)*np.sin(phi)))
#x_gps = (Radio + height)*np.cos(phi)*np.cos(lamda)
#y_gps = (Radio + height)*np.cos(phi)*np.sin(lamda)
#z_gps = height
flag_gps = True
if(len(data_imu) == 162):
#print "holi 4"
if(data_imu[0:4] == "faff"):
#print "holi 5"
rotat = data_imu[14:86]
accel = data_imu[-70:-46] # se parsea la aceleracion
if(len(rotat) == 72):
#print "holi 6"
a = struct.unpack('!f', rotat[0:8].decode('hex'))[0]
b = struct.unpack('!f', rotat[8:16].decode('hex'))[0]
c = struct.unpack('!f', rotat[16:24].decode('hex'))[0]
d = struct.unpack('!f', rotat[24:32].decode('hex'))[0]
e = struct.unpack('!f', rotat[32:40].decode('hex'))[0]
f = struct.unpack('!f', rotat[40:48].decode('hex'))[0]
g = struct.unpack('!f', rotat[48:56].decode('hex'))[0]
h = struct.unpack('!f', rotat[56:64].decode('hex'))[0]
i = struct.unpack('!f', rotat[64:72].decode('hex'))[0]
matrix_rot = np.matrix( ((a,b,c),(d,e,f),(g,h,i)) )
if(len(accel)==24):
#print "holi 7"
a_x_imu = struct.unpack('!f', accel[0:8].decode('hex'))[0]
a_y_imu = struct.unpack('!f', accel[8:16].decode('hex'))[0]
a_z_imu = struct.unpack('!f', accel[16:24].decode('hex'))[0]
#print "holi 8"
a_x = a_x_imu
a_y = a_y_imu
a_z = a_z_imu
#print "holi 8.3"
#print matrix_rot
a_real = matrix_rot.getI()
#print "holi 8.4"
a_real = a_real*np.matrix( ((a_x,0,0),(a_y,0,0),(a_z,0,0)) )
#print "holi 8.5"
a_x = a_real[0,0]
a_y = a_real[1,0]
a_z = a_real[2,0] - gravity[2,0]
alpha = 0.05
beta = 0.250
#print "holi 9"
if(abs(a_x) < 0.2):
offset_a_x = a_x*alpha + a_prev_x*(1-alpha)
# dev_a_x = (1-beta)*dev_a_x + beta*(abs(a_x - offset_a_x))
if(abs(a_y) < 0.2):
offset_a_y = a_y*alpha + a_prev_y*(1-alpha)
# dev_a_y = (1-beta)*dev_a_y + beta*(abs(a_y - offset_a_y))
if(abs(a_z) < 0.2):
offset_a_z = a_z*alpha + a_prev_z*(1-alpha)
# dev_a_z = (1-beta)*dev_a_z + beta*(abs(a_z - offset_a_z))
a_x -= offset_a_x
a_y -= offset_a_y
a_z -= offset_a_z
a_x = round(a_x,2)
a_y = round(a_y,2)
a_z = round(a_z,2)
j_next_x = (a_x - a_prev_x)/dt
j_next_y = (a_y - a_prev_y)/dt
j_next_z = (a_z - a_prev_z)/dt
# print "jerk : ", j_x, j_y, j_z
v_next_x = v_x + round(a_x*dt,6) + round(j_x*dt*dt*0.5,6)
v_next_y = v_y + round(a_y*dt,6) + round(j_y*dt*dt*0.5,6)
v_next_z = v_z + round(a_z*dt,6) + round(j_z*dt*dt*0.5,6)
# print "velocity :\t", v_next_x, "\t", v_next_y, "\t", v_next_z
x_next = x + round(v_x*dt,4) + round(a_x*dt*dt*0.5,4) + round(j_x*dt*dt*dt/6.0,4)
y_next = y + round(v_y*dt,4) + round(a_y*dt*dt*0.5,4) + round(j_y*dt*dt*dt/6.0,4)
z_next = z + round(v_z*dt,4) + round(a_z*dt*dt*0.5,4) + round(j_z*dt*dt*dt/6.0,4)
# print "position :\t", x_next, "\t", y_next, "\t", z_next
# asignacion del nuevo estado
a_prev_x = a_x
a_prev_y = a_y
a_prev_z = a_z
j_x = j_next_x
j_y = j_next_y
j_z = j_next_z
# if(count%250 == 0):
# v_x = 0
# v_y = 0
# v_z = 0
# else:
v_x = round(v_next_x,6)
v_y = round(v_next_y,6)
v_z = round(v_next_z,6)
x = round(x_next,6)
y = round(y_next,6)
z = round(z_next,6)
if(flag_gps):
x = y = z = 0.0
# print "real position : ", x, y, z
# print "tiempo", time.time()
#count += 1
#print "holi 10"
if(len(data_lidar) == 14):
data_angle = data_lidar[4] + data_lidar[5] + data_lidar[2] + data_lidar[3]
#print "len :\t", len(data_angle), data_angle
angle = int(data_angle,16)/16.0
distance = int((data_lidar[8] + data_lidar[9] + data_lidar[6] + data_lidar[7]),16)
signal_strength = int((data_lidar[10] + data_lidar[11]),16)
#print "angle :\t",angle, "distancia :\t", distance
if( ((angle >= 0 and angle <= 360) or (angle >= 337.5 and angle <= 360)) and distance <= 4000 ):
#print "angle :\t",angle,"cos :\t", np.sin(angle*3.141592/180.0), "distancia :\t", distance
x_lidar = distance*np.sin(angle*3.141592/180)/100
z_lidar = -distance*np.cos(angle*3.141592/180)/100
y_lidar = 0.0
#print "puntos lidar:\t", x_lidar, "\t", y_lidar, "\t", z_lidar
vector_lidar_real = matrix_rot*np.matrix( ((x_lidar,0,0),(y_lidar,0,0),(z_lidar,0,0)) )
print "orientacion:\n", matrix_rot*np.matrix(((0,0,0),(0,0,0),(1,0,0)))
#Xlas = x_gps + x + vector_lidar_real[0,0]
#Ylas = y_gps + y + vector_lidar_real[1,0]
#Zlas = z_gps + z + vector_lidar_real[2,0]
Xlas = (lon + (x + vector_lidar_real[0,0])*180/(Radio*3.141592))*3600
Ylas = (lat + (y + vector_lidar_real[1,0])*180/(Radio*3.141592))*3600
Zlas = height + z + vector_lidar_real[2,0]
if(Xlas > x_max):
x_max = Xlas
if(Xlas < x_min):
x_min = Xlas
if(Ylas > y_max):
y_max = Ylas
if(Ylas < y_min):
y_min = Ylas
if(Zlas > z_max):
z_max = Zlas
if(Zlas < z_min):
z_min = Zlas
#print "puntos:\t", Xlas, "\t", Ylas, "\t", Zlas
punto = liblas.point.Point()
punto.x = Xlas
punto.y = Ylas
punto.z = Zlas
las_file.write(punto)
flag_gps = False
las_header.min = [x_min,y_min,z_min]
las_header.max = [x_max,y_max,z_max]
las_file.close()
f = open("./LASFILES/listo.las","w")
f.close()
except ZeroDivision():
print "algo salio mal con"
exit(-1)
if __name__ == '__main__':
main()
| 27.953405 | 100 | 0.585973 |
381b8d166f8c19a9965ed4f1126dcfc0e1efcd5b | 31,188 | py | Python | kge/model/kge_model.py | beyondacm/kge | 6e1daac2541c821d1e4c28a93bcb38389222a255 | [
"MIT"
] | null | null | null | kge/model/kge_model.py | beyondacm/kge | 6e1daac2541c821d1e4c28a93bcb38389222a255 | [
"MIT"
] | null | null | null | kge/model/kge_model.py | beyondacm/kge | 6e1daac2541c821d1e4c28a93bcb38389222a255 | [
"MIT"
] | null | null | null | import importlib
import tempfile
from collections import OrderedDict
from torch import Tensor
import torch.nn
import numpy as np
import os
import kge
from kge import Config, Configurable, Dataset
from kge.misc import filename_in_module, init_from
from kge.util import load_checkpoint
from typing import Any, Dict, List, Optional, Union, Tuple
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from kge.job import Job
SLOTS = [0, 1, 2]
S, P, O = SLOTS
class KgeBase(torch.nn.Module, Configurable):
r"""Base class for all KGE models, scorers, and embedders."""
def __init__(self, config: Config, dataset: Dataset, configuration_key=None):
Configurable.__init__(self, config, configuration_key)
torch.nn.Module.__init__(self)
self.dataset = dataset
self.meta: Dict[str, Any] = dict() #: meta-data stored with this module
self.backward_compatible_keys = {
"_entity_embedder.embeddings.weight": "_entity_embedder._embeddings.weight",
"_relation_embedder.embeddings.weight": "_relation_embedder._embeddings.weight",
"_base_model._entity_embedder.embeddings.weight": "_base_model._entity_embedder._embeddings.weight",
"_base_model._relation_embedder.embeddings.weight": "_base_model._relation_embedder._embeddings.weight",
}
@staticmethod
def _initialize(what: Tensor, initialize: str, initialize_args):
try:
getattr(torch.nn.init, initialize)(what, **initialize_args)
except Exception as e:
raise ValueError(
"invalid initialization options: {} with args {}".format(
initialize, initialize_args
)
) from e
def initialize(self, what: Tensor, config=None, configuration_key=None):
"""Initialize tensor with provided configuration.
The initializers are taken from options "initialize" and "initialize_args".
If set, config and configuration_key overwrite the default configuration used in
this class. When both are set, self can be None.
"""
if config is None:
config = self.config
if configuration_key is None:
configuration_key = self.configuration_key
configurable = Configurable(config, configuration_key)
initialize = configurable.get_option("initialize")
try:
initialize_args_key = "initialize_args." + initialize
initialize_args = configurable.get_option(initialize_args_key)
except KeyError:
initialize_args_key = "initialize_args"
initialize_args = configurable.get_option(initialize_args_key)
# Automatically set arg a (lower bound) for uniform_ if not given
if initialize == "uniform_" and "a" not in initialize_args:
initialize_args["a"] = initialize_args["b"] * -1
config.set_option(
initialize_args_key + ".a", initialize_args["a"], log=True
)
KgeBase._initialize(what, initialize, initialize_args)
def prepare_job(self, job: "Job", **kwargs):
r"""Prepares the given job to work with this model.
If this model does not support the specified job type, this function may raise
an error.
This function commonly registers hooks specific to this model. For a list of
available hooks during training or evaluation, see :class:`TrainingJob` or
:class:`EvaluationJob`:, respectively.
"""
def penalty(self, **kwargs) -> List[Tensor]:
r"""Returns additional penalty terms that are added to the loss during training.
This method is called once per batch during training. The arguments being passed
depend on the trainer being used.
Returns a (possibly empty) list of penalty terms.
"""
return []
def save(self):
"Returns data structure to save state"
return (self.state_dict(), self.meta)
def load(self, savepoint):
"Loads state from a saved data structure"
# handle deprecated keys
state_dict = OrderedDict()
for k, v in savepoint[0].items():
state_dict[self.backward_compatible_keys.get(k, k)] = v
self.load_state_dict(state_dict)
self.meta = savepoint[1]
class RelationalScorer(KgeBase):
r"""Base class for all relational scorers.
Relational scorers take as input the embeddings of (subject, predicate,
object)-triple and produce a score.
Implementations of this class should either implement
:func:`~RelationalScorer.score_emb_spo` (the quick way, but potentially inefficient)
or :func:`~RelationalScorer.score_emb` (the hard way, potentially more efficient).
"""
def __init__(self, config: Config, dataset: Dataset, configuration_key: str):
super().__init__(config, dataset, configuration_key)
def score_emb_spo(self, s_emb: Tensor, p_emb: Tensor, o_emb: Tensor) -> Tensor:
r"""Scores a set of triples specified by their embeddings.
`s_emb`, `p_emb`, and `o_emb` are tensors of size :math:`n\times d_e`,
:math:`n\times d_r`, and :math:`n\times d_e`, where :math:`d_e` and
:math:`d_r` are the sizes of the entity and relation embeddings, respectively.
The embeddings are combined row-wise. The output is a :math`n\times 1` tensor,
in which the :math:`i`-th entry holds the score of the embedding triple
:math:`(s_i, p_i, o_i)`.
"""
return self.score_emb(s_emb, p_emb, o_emb, "spo")
def score_emb(
self, s_emb: Tensor, p_emb: Tensor, o_emb: Tensor, combine: str
) -> Tensor:
r"""Scores a set of triples specified by their embeddings.
`s_emb`, `p_emb`, and `o_emb` are tensors of size :math:`n_s\times d_e`,
:math:`n_p\times d_r`, and :math:`n_o\times d_e`, where :math:`d_e` and
:math:`d_r` are the sizes of the entity and relation embeddings, respectively.
The provided embeddings are combined based on the value of `combine`. Common
values are :code:`"spo"`, :code:`"sp_"`, and :code:`"_po"`. Not all models may
support all combinations.
When `combine` is :code:`"spo"`, then embeddings are combined row-wise. In this
case, it is required that :math:`n_s=n_p=n_o=n`. The output is identical to
:func:`~RelationalScorer.score_emb_spo`, i.e., a :math`n\times 1` tensor, in
which the :math:`i`-th entry holds the score of the embedding triple
:math:`(s_i, p_i, o_i)`.
When `combine` is :code:`"sp_"`, the subjects and predicates are taken row-wise
and subsequently combined with all objects. In this case, it is required that
:math:`n_s=n_p=n`. The output is a :math`n\times n_o` tensor, in which the
:math:`(i,j)`-th entry holds the score of the embedding triple :math:`(s_i, p_i,
o_j)`.
When `combine` is :code:`"_po"`, predicates and objects are taken row-wise and
subsequently combined with all subjects. In this case, it is required that
:math:`n_p=n_o=n`. The output is a :math`n\times n_s` tensor, in which the
:math:`(i,j)`-th entry holds the score of the embedding triple :math:`(s_j, p_i,
o_i)`.
"""
n = p_emb.size(0)
if combine == "spo":
assert s_emb.size(0) == n and o_emb.size(0) == n
out = self.score_emb_spo(s_emb, p_emb, o_emb)
elif combine == "sp_":
assert s_emb.size(0) == n
n_o = o_emb.size(0)
s_embs = s_emb.repeat_interleave(n_o, 0)
p_embs = p_emb.repeat_interleave(n_o, 0)
o_embs = o_emb.repeat((n, 1))
out = self.score_emb_spo(s_embs, p_embs, o_embs)
elif combine == "_po":
assert o_emb.size(0) == n
n_s = s_emb.size(0)
s_embs = s_emb.repeat((n, 1))
p_embs = p_emb.repeat_interleave(n_s, 0)
o_embs = o_emb.repeat_interleave(n_s, 0)
out = self.score_emb_spo(s_embs, p_embs, o_embs)
elif combine == "s_o":
n = s_emb.size(0)
assert o_emb.size(0) == n
n_p = p_emb.size(0)
s_embs = s_emb.repeat_interleave(n_p, 0)
p_embs = p_emb.repeat((n, 1))
o_embs = o_emb.repeat_interleave(n_p, 0)
out = self.score_emb_spo(s_embs, p_embs, o_embs)
else:
raise ValueError('cannot handle combine="{}".format(combine)')
return out.view(n, -1)
class KgeEmbedder(KgeBase):
r"""Base class for all embedders of a fixed number of objects.
Objects can be entities, relations, mentions, and so on.
"""
def __init__(
self,
config: Config,
dataset: Dataset,
configuration_key: str,
init_for_load_only=False,
):
super().__init__(config, dataset, configuration_key)
#: location of the configuration options of this embedder
self.embedder_type: str = self.get_option("type")
# verify all custom options by trying to set them in a copy of this
# configuration (quick and dirty, but works)
try:
custom_options = Config.flatten(config.get(self.configuration_key))
except KeyError:
# there are no custom options
custom_options = {}
if "type" in custom_options:
del custom_options["type"]
dummy_config = self.config.clone()
for key, value in custom_options.items():
try:
dummy_config.set(self.embedder_type + "." + key, value)
except ValueError as ve:
raise ValueError(
"key {}.{} invalid or of incorrect type, message was {}".format(
self.configuration_key, key, ve
)
)
self.dim: int = self.get_option("dim")
@staticmethod
def create(
config: Config,
dataset: Dataset,
configuration_key: str,
vocab_size: int,
init_for_load_only=False,
) -> "KgeEmbedder":
"""Factory method for embedder creation."""
try:
embedder_type = config.get_default(configuration_key + ".type")
class_name = config.get(embedder_type + ".class_name")
except:
raise Exception("Can't find {}.type in config".format(configuration_key))
try:
embedder = init_from(
class_name,
config.get("modules"),
config,
dataset,
configuration_key,
vocab_size,
init_for_load_only=init_for_load_only,
)
return embedder
except:
config.log(f"Failed to create embedder {embedder_type} (class {class_name}).")
raise
def _intersect_ids_with_pretrained_embedder(
self, pretrained_embedder: "KgeEmbedder"
) -> Tuple[np.array, np.array]:
"""
Intersect entity/relation ids of the embedder with embedderings of a pretrained
embedder.
Args:
pretrained_embedder: KgeEmbedder with pre-trained embeddings
Returns:
self_intersection_ind: index if the intersecting entities/relations
in this embedder
pretrained_intersection_ind: index of intersecting entities/relations
in the pretrained embedder
"""
if "entity_embedder" in self.configuration_key:
self_ids = self.dataset.entity_ids()
pretrained_ids = pretrained_embedder.dataset.entity_ids()
elif "relation_embedder" in self.configuration_key:
self_ids = self.dataset.relation_ids()
pretrained_ids = pretrained_embedder.dataset.relation_ids()
else:
raise ValueError(
"Can only initialize entity or relation embedder with"
" pretrained embeddings"
)
_, self_intersect_ind, pretrained_intersect_ind = np.intersect1d(
self_ids, pretrained_ids, return_indices=True
)
if self.get_option("pretrain.ensure_all") and not len(
self_intersect_ind
) == len(self_ids):
raise IndexError(
"Not all embeddings could be initialized with the embeddings provided "
"in the pre-trained model"
)
return self_intersect_ind, pretrained_intersect_ind
@torch.no_grad()
def init_pretrained(self, pretrained_embedder: "KgeEmbedder") -> None:
"""
Initialize embedding layer with pre-trained embeddings from another embedder.
Maps embeddings based on the entity/relation ids.
Args:
pretrained_embedder: KgeEmbedder with pre-trained embeddings
Returns:
None
"""
raise NotImplementedError
def forward(self, indexes: Tensor) -> Tensor:
return self.embed(indexes)
def embed(self, indexes: Tensor) -> Tensor:
"""Computes the embedding."""
raise NotImplementedError
def embed_all(self) -> Tensor:
"""Returns all embeddings."""
raise NotImplementedError
class KgeModel(KgeBase):
r"""Generic KGE model for KBs with a fixed set of entities and relations.
This class uses :class:`KgeEmbedder` to associate each subject, relation, and object
with an embedding, and a :class:`RelationalScorer` to score (subject, predicate,
object) triples.
"""
def __init__(
self,
config: Config,
dataset: Dataset,
scorer: Union[RelationalScorer, type],
create_embedders=True,
configuration_key=None,
init_for_load_only=False,
):
super().__init__(config, dataset, configuration_key)
# TODO support different embedders for subjects and objects
#: Embedder used for entities (both subject and objects)
self._entity_embedder: KgeEmbedder
#: Embedder used for relations
self._relation_embedder: KgeEmbedder
if create_embedders:
self._entity_embedder = KgeEmbedder.create(
config,
dataset,
self.configuration_key + ".entity_embedder",
dataset.num_entities(),
init_for_load_only=init_for_load_only,
)
#: Embedder used for relations
num_relations = dataset.num_relations()
self._relation_embedder = KgeEmbedder.create(
config,
dataset,
self.configuration_key + ".relation_embedder",
num_relations,
init_for_load_only=init_for_load_only,
)
if not init_for_load_only:
# load pretrained embeddings
pretrained_entities_filename = ""
pretrained_relations_filename = ""
if self.has_option("entity_embedder.pretrain.model_filename"):
pretrained_entities_filename = self.get_option(
"entity_embedder.pretrain.model_filename"
)
if self.has_option("relation_embedder.pretrain.model_filename"):
pretrained_relations_filename = self.get_option(
"relation_embedder.pretrain.model_filename"
)
def load_pretrained_model(
pretrained_filename: str,
) -> Optional[KgeModel]:
if pretrained_filename != "":
self.config.log(
f"Initializing with embeddings stored in "
f"{pretrained_filename}"
)
checkpoint = load_checkpoint(pretrained_filename)
return KgeModel.create_from(checkpoint)
return None
pretrained_entities_model = load_pretrained_model(
pretrained_entities_filename
)
if pretrained_entities_filename == pretrained_relations_filename:
pretrained_relations_model = pretrained_entities_model
else:
pretrained_relations_model = load_pretrained_model(
pretrained_relations_filename
)
if pretrained_entities_model is not None:
if (
pretrained_entities_model.get_s_embedder()
!= pretrained_entities_model.get_o_embedder()
):
raise ValueError(
"Can only initialize with pre-trained models having "
"identical subject and object embeddings."
)
self._entity_embedder.init_pretrained(
pretrained_entities_model.get_s_embedder()
)
if pretrained_relations_model is not None:
self._relation_embedder.init_pretrained(
pretrained_relations_model.get_p_embedder()
)
#: Scorer
self._scorer: RelationalScorer
if type(scorer) == type:
# scorer is type of the scorer to use; call its constructor
self._scorer = scorer(
config=config, dataset=dataset, configuration_key=self.configuration_key
)
else:
self._scorer = scorer
# overridden to also set self.model
def _init_configuration(self, config: Config, configuration_key: Optional[str]):
Configurable._init_configuration(self, config, configuration_key)
if not hasattr(self, "model") or not self.model:
if self.configuration_key:
self.model: str = config.get(self.configuration_key + ".type")
else:
self.model: str = config.get("model")
self.configuration_key = self.model
@staticmethod
def create(
config: Config,
dataset: Dataset,
configuration_key: Optional[str] = None,
init_for_load_only=False,
) -> "KgeModel":
"""Factory method for model creation."""
try:
if configuration_key is not None:
model_name = config.get(configuration_key + ".type")
else:
model_name = config.get("model")
class_name = config.get(model_name + ".class_name")
except:
raise Exception("Can't find {}.type in config".format(configuration_key))
try:
model = init_from(
class_name,
config.get("modules"),
config=config,
dataset=dataset,
configuration_key=configuration_key,
init_for_load_only=init_for_load_only,
)
model.to(config.get("job.device"))
return model
except:
config.log(f"Failed to create model {model_name} (class {class_name}).")
raise
@staticmethod
def create_default(
model: Optional[str] = None,
dataset: Optional[Union[Dataset, str]] = None,
options: Dict[str, Any] = {},
folder: Optional[str] = None,
) -> "KgeModel":
"""Utility method to create a model, including configuration and dataset.
`model` is the name of the model (takes precedence over
``options["model"]``), `dataset` a dataset name or `Dataset` instance (takes
precedence over ``options["dataset.name"]``), and options arbitrary other
configuration options.
If `folder` is ``None``, creates a temporary folder. Otherwise uses the
specified folder.
"""
# load default model config
if model is None:
model = options["model"]
default_config_file = filename_in_module(kge.model, "{}.yaml".format(model))
config = Config()
config.load(default_config_file, create=True)
# apply specified options
config.set("model", model)
if isinstance(dataset, Dataset):
config.set("dataset.name", dataset.config.get("dataset.name"))
elif isinstance(dataset, str):
config.set("dataset.name", dataset)
config.set_all(new_options=options)
# create output folder
if folder is None:
config.folder = tempfile.mkdtemp(
"{}-{}-".format(config.get("dataset.name"), config.get("model"))
)
else:
config.folder = folder
# create dataset and model
if not isinstance(dataset, Dataset):
dataset = Dataset.create(config)
model = KgeModel.create(config, dataset)
return model
@staticmethod
def create_from(
checkpoint: Dict,
dataset: Optional[Dataset] = None,
use_tmp_log_folder=True,
new_config: Config = None,
) -> "KgeModel":
"""Loads a model from a checkpoint file of a training job or a packaged model.
If dataset is specified, associates this dataset with the model. Otherwise uses
the dataset used to train the model.
If `use_tmp_log_folder` is set, the logs and traces are written to a temporary
file. Otherwise, the files `kge.log` and `trace.yaml` will be created (or
appended to) in the checkpoint's folder.
"""
config = Config.create_from(checkpoint)
if new_config:
config.load_config(new_config)
if use_tmp_log_folder:
import tempfile
config.log_folder = tempfile.mkdtemp(prefix="kge-")
else:
config.log_folder = checkpoint["folder"]
if not config.log_folder or not os.path.exists(config.log_folder):
config.log_folder = "."
dataset = Dataset.create_from(checkpoint, config, dataset, preload_data=False)
model = KgeModel.create(config, dataset, init_for_load_only=True)
model.load(checkpoint["model"])
model.eval()
return model
def prepare_job(self, job: "Job", **kwargs):
super().prepare_job(job, **kwargs)
self._entity_embedder.prepare_job(job, **kwargs)
self._relation_embedder.prepare_job(job, **kwargs)
from kge.job import TrainingOrEvaluationJob
if isinstance(job, TrainingOrEvaluationJob):
def append_num_parameter(job):
job.current_trace["epoch"]["num_parameters"] = sum(
map(lambda p: p.numel(), job.model.parameters())
)
job.post_epoch_hooks.append(append_num_parameter)
def penalty(self, **kwargs) -> List[Tensor]:
# Note: If the subject and object embedder are identical, embeddings may be
# penalized twice. This is intended (and necessary, e.g., if the penalty is
# weighted).
if "batch" in kwargs and "triples" in kwargs["batch"]:
triples = kwargs["batch"]["triples"].to(self.config.get("job.device"))
penalty_result = super().penalty(**kwargs) + self.get_p_embedder().penalty(
indexes=triples[:, P], **kwargs
)
if self.get_s_embedder() is self.get_o_embedder():
weighted = self.get_s_embedder().get_option("regularize_args.weighted")
entity_indexes = None
if weighted:
entity_indexes = torch.cat(
(triples[:, S].view(-1, 1), triples[:, O].view(-1, 1)), dim=1
)
entity_penalty_result = self.get_s_embedder().penalty(
indexes=entity_indexes,
**kwargs,
)
if not weighted:
# backwards compatibility
for penalty in entity_penalty_result:
for p in penalty:
p *= 2
penalty_result += entity_penalty_result
else:
penalty_result += self.get_s_embedder().penalty(
indexes=triples[:, S], **kwargs
)
penalty_result += self.get_o_embedder().penalty(
indexes=triples[:, O], **kwargs
)
return penalty_result
else:
penalty_result = super().penalty(**kwargs) + self.get_p_embedder().penalty(
**kwargs
)
if self.get_s_embedder() is self.get_o_embedder():
entity_penalty_result = self.get_s_embedder().penalty(**kwargs)
for penalty in entity_penalty_result:
for p in penalty:
p *= 2
penalty_result += entity_penalty_result
else:
penalty_result += self.get_s_embedder().penalty(**kwargs)
penalty_result += self.get_o_embedder().penalty(**kwargs)
return penalty_result
def get_s_embedder(self) -> KgeEmbedder:
return self._entity_embedder
def get_o_embedder(self) -> KgeEmbedder:
return self._entity_embedder
def get_p_embedder(self) -> KgeEmbedder:
return self._relation_embedder
def get_scorer(self) -> RelationalScorer:
return self._scorer
def score_spo(self, s: Tensor, p: Tensor, o: Tensor, direction=None) -> Tensor:
r"""Compute scores for a set of triples.
`s`, `p`, and `o` are vectors of common size :math:`n`, holding the indexes of
the subjects, relations, and objects to score.
`direction` may influence how scores are computed. For most models, this setting
has no meaning. For reciprocal relations, direction must be either `"s"` or
`"o"` (depending on what is predicted).
Returns a vector of size :math:`n`, in which the :math:`i`-th entry holds the
score of triple :math:`(s_i, p_i, o_i)`.
"""
s = self.get_s_embedder().embed(s)
p = self.get_p_embedder().embed(p)
o = self.get_o_embedder().embed(o)
return self._scorer.score_emb(s, p, o, combine="spo").view(-1)
def score_sp(self, s: Tensor, p: Tensor, o: Tensor = None) -> Tensor:
r"""Compute scores for triples formed from a set of sp-pairs and all (or a subset of the) objects.
`s` and `p` are vectors of common size :math:`n`, holding the indexes of the
subjects and relations to score.
Returns an :math:`n\times E` tensor, where :math:`E` is the total number of
known entities. The :math:`(i,j)`-entry holds the score for triple :math:`(s_i,
p_i, j)`.
If `o` is not None, it is a vector holding the indexes of the objects to score.
"""
s = self.get_s_embedder().embed(s)
p = self.get_p_embedder().embed(p)
if o is None:
o = self.get_o_embedder().embed_all()
else:
o = self.get_o_embedder().embed(o)
return self._scorer.score_emb(s, p, o, combine="sp_")
def score_po(self, p: Tensor, o: Tensor, s: Tensor = None) -> Tensor:
r"""Compute scores for triples formed from a set of po-pairs and (or a subset of the) subjects.
`p` and `o` are vectors of common size :math:`n`, holding the indexes of the
relations and objects to score.
Returns an :math:`n\times E` tensor, where :math:`E` is the total number of
known entities. The :math:`(i,j)`-entry holds the score for triple :math:`(j,
p_i, o_i)`.
If `s` is not None, it is a vector holding the indexes of the objects to score.
"""
if s is None:
s = self.get_s_embedder().embed_all()
else:
s = self.get_s_embedder().embed(s)
o = self.get_o_embedder().embed(o)
p = self.get_p_embedder().embed(p)
return self._scorer.score_emb(s, p, o, combine="_po")
def score_so(self, s: Tensor, o: Tensor, p: Tensor = None) -> Tensor:
r"""Compute scores for triples formed from a set of so-pairs and all (or a subset of the) relations.
`s` and `o` are vectors of common size :math:`n`, holding the indexes of the
subjects and objects to score.
Returns an :math:`n\times R` tensor, where :math:`R` is the total number of
known relations. The :math:`(i,j)`-entry holds the score for triple :math:`(s_i,
j, o_i)`.
If `p` is not None, it is a vector holding the indexes of the relations to score.
"""
s = self.get_s_embedder().embed(s)
o = self.get_o_embedder().embed(o)
if p is None:
p = self.get_p_embedder().embed_all()
else:
p = self.get_p_embedder().embed(p)
return self._scorer.score_emb(s, p, o, combine="s_o")
def score_sp_po(
self, s: Tensor, p: Tensor, o: Tensor, entity_subset: Tensor = None
) -> Tensor:
r"""Combine `score_sp` and `score_po`.
`s`, `p` and `o` are vectors of common size :math:`n`, holding the indexes of
the subjects, relations, and objects to score.
Each sp-pair and each po-pair is scored against the entities in `entity_subset`
(also holds indexes). If set to `entity_subset` is `None`, scores against all
entities.
The result is the horizontal concatenation of the outputs of
:code:`score_sp(s,p,entity_subset)` and :code:`score_po(p,o,entity_subset)`.
I.e., returns an :math:`n\times 2E` tensor, where :math:`E` is the size of
`entity_subset`. For :math:`j<E`, the :math:`(i,j)`-entry holds the score for
triple :math:`(s_i, p_i, e_j)`. For :math:`j\ge E`, the :math:`(i,j)`-entry
holds the score for triple :math:`(e_{j-E}, p_i, o_i)`.
"""
s = self.get_s_embedder().embed(s)
p = self.get_p_embedder().embed(p)
o = self.get_o_embedder().embed(o)
if self.get_s_embedder() is self.get_o_embedder():
if entity_subset is not None:
all_entities = self.get_s_embedder().embed(entity_subset)
else:
all_entities = self.get_s_embedder().embed_all()
sp_scores = self._scorer.score_emb(s, p, all_entities, combine="sp_")
po_scores = self._scorer.score_emb(all_entities, p, o, combine="_po")
else:
if entity_subset is not None:
all_objects = self.get_o_embedder().embed(entity_subset)
all_subjects = self.get_s_embedder().embed(entity_subset)
else:
all_objects = self.get_o_embedder().embed_all()
all_subjects = self.get_s_embedder().embed_all()
sp_scores = self._scorer.score_emb(s, p, all_objects, combine="sp_")
po_scores = self._scorer.score_emb(all_subjects, p, o, combine="_po")
return torch.cat((sp_scores, po_scores), dim=1)
| 39.428571 | 116 | 0.598531 |
1e7d3a6ebb22f1de76b0b00d30790557174fa349 | 14,897 | py | Python | utils.py | yshanyes/Pytorch-ECG-Classifier-Cinc2020-Official | 292fcf0758d526dad204cbec5935c864b7ee9444 | [
"BSD-2-Clause"
] | 7 | 2020-09-24T03:08:56.000Z | 2022-01-12T12:51:19.000Z | utils.py | yshanyes/Pytorch-ECG-Classifier-Cinc2020-Official | 292fcf0758d526dad204cbec5935c864b7ee9444 | [
"BSD-2-Clause"
] | null | null | null | utils.py | yshanyes/Pytorch-ECG-Classifier-Cinc2020-Official | 292fcf0758d526dad204cbec5935c864b7ee9444 | [
"BSD-2-Clause"
] | 4 | 2021-01-16T10:45:13.000Z | 2021-06-22T07:03:53.000Z | # -*- coding: utf-8 -*-
'''
@time: 2019/10/1 10:20
@ author: ys
'''
import torch
import numpy as np
import time,os
from sklearn.metrics import f1_score
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from config import config
import pandas as pd
weights_file = './evaluation/weights.csv'
mapping_score_file = './evaluation/dx_mapping_scored.csv'
def is_number(x):
try:
float(x)
return True
except ValueError:
return False
# Load a table with row and column names.
def load_table(table_file):
# The table should have the following form:
#
# , a, b, c
# a, 1.2, 2.3, 3.4
# b, 4.5, 5.6, 6.7
# c, 7.8, 8.9, 9.0
#
table = list()
with open(table_file, 'r') as f:
for i, l in enumerate(f):
arrs = [arr.strip() for arr in l.split(',')]
table.append(arrs)
# Define the numbers of rows and columns and check for errors.
num_rows = len(table)-1
if num_rows<1:
raise Exception('The table {} is empty.'.format(table_file))
num_cols = set(len(table[i])-1 for i in range(num_rows))
if len(num_cols)!=1:
raise Exception('The table {} has rows with different lengths.'.format(table_file))
num_cols = min(num_cols)
if num_cols<1:
raise Exception('The table {} is empty.'.format(table_file))
# Find the row and column labels.
rows = [int(table[0][j+1]) for j in range(num_rows)]
cols = [int(table[i+1][0]) for i in range(num_cols)]
# Find the entries of the table.
values = np.zeros((num_rows, num_cols))
for i in range(num_rows):
for j in range(num_cols):
value = table[i+1][j+1]
if is_number(value):
values[i, j] = float(value)
else:
values[i, j] = float('nan')
return rows, cols, values
# Load weights.
def load_weights(weight_file, classes):
# Load the weight matrix.
rows, cols, values = load_table(weight_file)
assert(rows == cols)
num_rows = len(rows)
# Assign the entries of the weight matrix with rows and columns corresponding to the classes.
num_classes = len(classes)
weights = np.zeros((num_classes, num_classes), dtype=np.float64)
for i, a in enumerate(rows):
if a in classes:
k = classes.index(a)
for j, b in enumerate(rows):
if b in classes:
l = classes.index(b)
weights[k, l] = values[i, j]
return weights
scored_classes = pd.read_csv(mapping_score_file)['SNOMED CT Code'].values.tolist()
weights = load_weights(weights_file,scored_classes)
# I refered https://github.com/c0nn3r/RetinaNet/blob/master/focal_loss.py
class FocalLoss2d1(nn.Module):
def __init__(self, gamma=2, class_weight=None, size_average=True):
super(FocalLoss2d1, self).__init__()
self.gamma = gamma
self.size_average = size_average
self.class_weight = class_weight
def forward(self, logit, target, type='sigmoid'):
target = target.view(-1, 1).long()
if type=='sigmoid':
if self.class_weight is None:
self.class_weight = [1]*2 #[0.5, 0.5]
prob = F.sigmoid(logit)
prob = prob.view(-1, 1)
prob = torch.cat((1-prob, prob), 1)
select = torch.FloatTensor(len(prob), 2).zero_().cuda()
select.scatter_(1, target, 1.)
elif type=='softmax':
B,C,H,W = logit.size()
if self.class_weight is None:
self.class_weight =[1]*C #[1/C]*C
logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, C)
prob = F.softmax(logit,1)
select = torch.FloatTensor(len(prob), C).zero_().cuda()
select.scatter_(1, target, 1.)
class_weight = torch.FloatTensor(self.class_weight).cuda().view(-1,1)
class_weight = torch.gather(self.class_weight, 0, target)
prob = (prob*select).sum(1).view(-1,1)
prob = torch.clamp(prob,1e-8,1-1e-8)
batch_loss = - class_weight *(torch.pow((1-prob), self.gamma))*prob.log()
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss
return loss
class FocalLoss1(nn.Module):
def __init__(self, focusing_param=2, balance_param=0.25):
super(FocalLoss, self).__init__()
self.cerition = nn.BCEWithLogitsLoss(reduction='none')#reduction='none', reduce=True
self.focusing_param = focusing_param
self.balance_param = balance_param
self.size_average = True
def forward(self, output, target):
# print(output)
# print(target)
logpt = self.cerition(output, target)
# cross_entropy = F.cross_entropy(output, target)
# cross_entropy_log = torch.log(cross_entropy)
# logpt = - F.cross_entropy(output, target)
pt = torch.exp(logpt)
focal_loss = ((1 - pt) ** self.focusing_param) * logpt
balanced_focal_loss = self.balance_param * focal_loss
if self.size_average:
loss = balanced_focal_loss.mean()
else:
loss = balanced_focal_loss
return loss
class FocalLoss(nn.Module):
def __init__(self, gama=10, alpha=0.5, size_average =True):
super(FocalLoss, self).__init__()
self.cerition = nn.BCEWithLogitsLoss(reduction='none')#reduction='none', reduce=True
self.gama = gama
self.alpha = alpha
self.size_average = size_average
def forward(self, output, target):
#logpt = - F.binary_cross_entropy_with_logits(output, target,reduction='mean')#self.cerition(output, target)
#pt = torch.exp(logpt)
p = output.sigmoid()
focal_loss = -self.alpha*(1-p)**self.gama * p.log()*target - (1-self.alpha)*(p)**self.gama * (1-p).log()*(1-target) #.mean()
#focal_loss = -((1 - pt) ** self.gama) * logpt
#balanced_focal_loss = self.balance_param * focal_loss
if self.size_average:
loss = focal_loss.mean()
else:
loss = focal_loss.sum()
loss = Variable(loss, requires_grad = True)
return loss
class FocalLoss2d(nn.modules.loss._WeightedLoss):
def __init__(self, gamma=2, weight=None, size_average=None, ignore_index=-100,
reduce=None, reduction='mean', balance_param=0.25):
super(FocalLoss2d, self).__init__(weight, size_average, reduce, reduction)
self.gamma = gamma
self.weight = weight
self.size_average = size_average
self.ignore_index = ignore_index
self.balance_param = balance_param
def forward(self, input, target):
# inputs and targets are assumed to be BatchxClasses
assert len(input.shape) == len(target.shape)
assert input.size(0) == target.size(0)
assert input.size(1) == target.size(1)
weight = Variable(self.weight)
# compute the negative likelyhood
logpt = - F.binary_cross_entropy_with_logits(input, target, pos_weight=weight, reduction=self.reduction)
pt = torch.exp(logpt)
# compute the loss
focal_loss = -( (1-pt)**self.gamma ) * logpt
balanced_focal_loss = self.balance_param * focal_loss
return balanced_focal_loss
def compute_beta_score(labels, output, beta, num_classes, check_errors=True):
# Check inputs for errors.
if check_errors:
if len(output) != len(labels):
raise Exception('Numbers of outputs and labels must be the same.')
# Populate contingency table.
num_recordings = len(labels)
fbeta_l = np.zeros(num_classes)
gbeta_l = np.zeros(num_classes)
fmeasure_l = np.zeros(num_classes)
accuracy_l = np.zeros(num_classes)
f_beta = 0
g_beta = 0
f_measure = 0
accuracy = 0
# Weight function
C_l=np.ones(num_classes);
for j in range(num_classes):
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_recordings):
num_labels = np.sum(labels[i])
if labels[i][j] and output[i][j]:
tp += 1/num_labels
elif not labels[i][j] and output[i][j]:
fp += 1/num_labels
elif labels[i][j] and not output[i][j]:
fn += 1/num_labels
elif not labels[i][j] and not output[i][j]:
tn += 1/num_labels
# Summarize contingency table.
if ((1+beta**2)*tp + (fn*beta**2) + fp):
fbeta_l[j] = float((1+beta**2)* tp) / float(((1+beta**2)*tp) + (fn*beta**2) + fp)
else:
fbeta_l[j] = 1.0
if (tp + fp + beta * fn):
gbeta_l[j] = float(tp) / float(tp + fp + beta*fn)
else:
gbeta_l[j] = 1.0
if tp + fp + fn + tn:
accuracy_l[j] = float(tp + tn) / float(tp + fp + fn + tn)
else:
accuracy_l[j] = 1.0
if 2 * tp + fp + fn:
fmeasure_l[j] = float(2 * tp) / float(2 * tp + fp + fn)
else:
fmeasure_l[j] = 1.0
for i in range(num_classes):
f_beta += fbeta_l[i]*C_l[i]
g_beta += gbeta_l[i]*C_l[i]
f_measure += fmeasure_l[i]*C_l[i]
accuracy += accuracy_l[i]*C_l[i]
f_beta = float(f_beta)/float(num_classes)
g_beta = float(g_beta)/float(num_classes)
f_measure = float(f_measure)/float(num_classes)
accuracy = float(accuracy)/float(num_classes)
return accuracy,f_measure,f_beta,g_beta,compute_challenge_metric(weights,labels,output,scored_classes,[426783006])
# def calc_metric(y_true, y_pre, threshold=0.5):
# y_true = y_true.cpu().detach().numpy().astype(np.int)
# y_pre = y_pre.cpu().detach().numpy() > threshold
# #y_true = y_true.view(-1).cpu().detach().numpy().astype(np.int)
# #y_pre = y_pre.view(-1).cpu().detach().numpy() > threshold
# return compute_beta_score(y_true, y_pre,beta=2,num_classes=config.num_classes)
# Compute modified confusion matrix for multi-class, multi-label tasks.
def compute_modified_confusion_matrix(labels, outputs):
# Compute a binary multi-class, multi-label confusion matrix, where the rows
# are the labels and the columns are the outputs.
num_recordings, num_classes = np.shape(labels)
A = np.zeros((num_classes, num_classes))
# Iterate over all of the recordings.
for i in range(num_recordings):
# Calculate the number of positive labels and/or outputs.
normalization = float(max(np.sum(np.any((labels[i, :], outputs[i, :]), axis=0)), 1))
# Iterate over all of the classes.
for j in range(num_classes):
# Assign full and/or partial credit for each positive class.
if labels[i, j]:
for k in range(num_classes):
if outputs[i, k]:
A[j, k] += 1.0/normalization
return A
# Compute the evaluation metric for the Challenge.
def compute_challenge_metric(weights, labels, outputs, classes, normal_class):
num_recordings, num_classes = np.shape(labels)
normal_index = 22#classes.index(normal_class)
# Compute the observed score.
A = compute_modified_confusion_matrix(labels, outputs)
observed_score = np.nansum(weights * A)
# Compute the score for the model that always chooses the correct label(s).
correct_outputs = labels
A = compute_modified_confusion_matrix(labels, correct_outputs)
correct_score = np.nansum(weights * A)
# Compute the score for the model that always chooses the normal class.
inactive_outputs = np.zeros((num_recordings, num_classes), dtype=np.bool)
inactive_outputs[:, normal_index] = 1
A = compute_modified_confusion_matrix(labels, inactive_outputs)
inactive_score = np.nansum(weights * A)
if correct_score != inactive_score:
normalized_score = float(observed_score - inactive_score) / float(correct_score - inactive_score)
else:
normalized_score = float('nan')
return normalized_score
def calc_metric(y_true, y_pre, threshold=0.5):
y_true = y_true.cpu().detach().numpy().astype(np.int)
y_pre = y_pre.cpu().detach().numpy() > threshold
#y_true = y_true.view(-1).cpu().detach().numpy().astype(np.int)
#y_pre = y_pre.view(-1).cpu().detach().numpy() > threshold
return compute_beta_score(y_true, y_pre,beta=2,num_classes=config.num_classes)
def mkdirs(path):
if not os.path.exists(path):
os.makedirs(path)
# 计算F1score
def calc_f1(y_true, y_pre, threshold=0.5):
y_true = y_true.view(-1).cpu().detach().numpy().astype(np.int)
y_pre = y_pre.view(-1).cpu().detach().numpy() > threshold
return f1_score(y_true, y_pre)
# 计算F1score
def re_calc_f1(y_true, y_pre, threshold=0.5):
y_true = y_true.cpu().detach().numpy().astype(np.int)
# print(y_true.shape)
y_prob = y_pre.cpu().detach().numpy()
y_pre = y_prob > threshold #* (y_true.shape[0]//34)).astype(np.int)
return y_true, y_prob, f1_score(y_true, y_pre,average='micro')
def fbeta(true_label, prediction):
from sklearn.metrics import f1_score
return f1_score(true_label, prediction, average='micro')#'micro', 'macro', 'weighted', 'samples'
def optimise_f1_thresholds_fast(y, p, iterations=20, verbose=True,num_classes=34):
best_threshold = [0.2]*num_classes
for t in range(num_classes):
best_fbeta = 0
temp_threshhold = [0.2]*num_classes
for i in range(iterations):
temp_value = i / float(iterations)
temp_threshhold[t] = temp_value
temp_fbeta = fbeta(y, p > temp_threshhold)
if temp_fbeta > best_fbeta:
best_fbeta = temp_fbeta
best_threshold[t] = temp_value
if verbose:
print(t, best_fbeta, best_threshold[t])
return best_threshold
#打印时间
def print_time_cost(since):
time_elapsed = time.time() - since
return '{:.0f}m{:.0f}s\n'.format(time_elapsed // 60, time_elapsed % 60)
# 调整学习率
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
#多标签使用类别权重
class WeightedMultilabel(nn.Module):
def __init__(self, weights: torch.Tensor):
super(WeightedMultilabel, self).__init__()
self.cerition = nn.BCEWithLogitsLoss(reduction='none')
self.weights = weights
def forward(self, outputs, targets):
loss = self.cerition(outputs, targets)
return (loss * self.weights).mean()
class Multilabel(nn.Module):
def __init__(self):
super(Multilabel, self).__init__()
self.cerition = nn.MultiLabelSoftMarginLoss()
def forward(self, outputs, targets):
loss = self.cerition(outputs, targets.long())
return loss
| 34.483796 | 132 | 0.621467 |
7b82f025a3dbed0a80314acfd5e6ce79a2d6b8b9 | 1,857 | py | Python | benchmark_cfpq/conftest.py | Pogozhelskaya/formal-languages-practice | 71d28a8398ec3f8c8f74bde8164bea0d54956e13 | [
"Apache-2.0"
] | null | null | null | benchmark_cfpq/conftest.py | Pogozhelskaya/formal-languages-practice | 71d28a8398ec3f8c8f74bde8164bea0d54956e13 | [
"Apache-2.0"
] | 1 | 2020-11-26T10:35:22.000Z | 2020-11-26T10:35:22.000Z | benchmark_cfpq/conftest.py | Pogozhelskaya/formal-languages-practice | 71d28a8398ec3f8c8f74bde8164bea0d54956e13 | [
"Apache-2.0"
] | null | null | null | import os
import shutil
from glob import glob
from pathlib import Path
import pytest
from src.cfg_algorithms import hellings, mxm_cfpq, tensor_cfg_cfpq, tensor_rsa_cfpq
cwd = './benchmark_cfpq'
results_dir = cwd + '/results'
data_for_cfpq_dir = cwd + '/myDataForCFPQ'
if not os.path.exists(data_for_cfpq_dir):
os.mkdir(data_for_cfpq_dir)
if not os.path.exists(results_dir):
os.mkdir(results_dir)
if len(os.listdir(data_for_cfpq_dir)) == 0:
shutil.unpack_archive(data_for_cfpq_dir + '.tar.xz', cwd)
suites = [
{
'id': f'algo={algo.__name__},graph={graph.split("/")[-3]},graph_file={Path(graph).stem},grammar={Path(grammar).stem}'
, 'algo': algo
, 'algo_name': algo.__name__
, 'graph': graph
, 'graph_name': graph.split("/")[-3]
, 'graph_filename': Path(graph).stem
, 'grammar': grammar
, 'grammar_name': Path(grammar).stem
}
for algo in [
hellings
, mxm_cfpq
, tensor_cfg_cfpq
, tensor_rsa_cfpq
]
for graph in glob(f'{data_for_cfpq_dir}/*/graphs/*')
for grammar in glob(f'{data_for_cfpq_dir}/{graph.split("/")[-3]}/grammars/*')
]
params = [
pytest.param(
{'algo': x['algo'], 'name': x['algo_name']}
, {'graph': x['graph'], 'name': x['graph_name'], 'filename': x['graph_filename']}
, {'grammar': x['grammar'], 'name': x['grammar_name']}
, marks=[
getattr(pytest.mark, x['algo_name'])
, getattr(pytest.mark, x['graph_name'])
, getattr(pytest.mark, x['grammar_name'])
, getattr(pytest.mark, x['graph_filename'])
]
, id=x['id']
)
for x in suites
]
def pytest_configure(config):
for param in params:
for mark in param.marks:
config.addinivalue_line('markers', f'{mark}: generated marker')
| 28.569231 | 125 | 0.605816 |
4248f532341a2754b3a4e176de1c641630be2836 | 1,688 | py | Python | src/brouwers/forum_tools/forms/widgets.py | modelbrouwers/modelbrouwers | e0ba4819bf726d6144c0a648fdd4731cdc098a52 | [
"MIT"
] | 6 | 2015-03-03T13:23:07.000Z | 2021-12-19T18:12:41.000Z | src/brouwers/forum_tools/forms/widgets.py | modelbrouwers/modelbrouwers | e0ba4819bf726d6144c0a648fdd4731cdc098a52 | [
"MIT"
] | 95 | 2015-02-07T00:55:39.000Z | 2022-02-08T20:22:05.000Z | src/brouwers/forum_tools/forms/widgets.py | modelbrouwers/modelbrouwers | e0ba4819bf726d6144c0a648fdd4731cdc098a52 | [
"MIT"
] | 2 | 2016-03-22T16:53:26.000Z | 2019-02-09T22:46:04.000Z | from django import forms
from django.conf import settings
from django.contrib.sites.models import Site
from django.forms.utils import flatatt
from django.utils.encoding import force_text
from django.utils.html import format_html, smart_urlquote
from django.utils.http import urlencode
from django.utils.translation import ugettext as _
class ForumToolsIDFieldWidget(forms.TextInput):
def __init__(self, urlparam=None, type_=None, **kwargs):
assert urlparam is not None
assert type_ in ["topic", "forum"] # viewtopic.php, viewforum.php
self.urlparam = urlparam
self.type_ = type_
super().__init__(**kwargs)
def render(self, name, value, attrs=None):
if value:
value = self.get_url(value)
html = super().render(name, value, attrs)
if value:
value = force_text(value)
final_attrs = {"href": smart_urlquote(value)}
html = format_html(
'<p class="url">{0} <a{1}>{2}</a><br />{3} {4}</p>',
_("Currently:"),
flatatt(final_attrs),
value,
_("Change:"),
html,
)
return html
def get_url(self, value):
if not value:
return None
try:
int(value)
except ValueError: # we're dealing with the url itself
return value
return "{scheme}://{domain}{prefix}/view{type}.php?{qs}".format(
scheme="http",
domain=Site.objects.get_current().domain,
prefix=settings.PHPBB_URL,
type=self.type_,
qs=urlencode({self.urlparam: value}),
)
| 33.76 | 74 | 0.583531 |
ab151f49f8c19ebdec5bf99c4e9359b3404661ad | 10,795 | py | Python | gtfspy/routing/profile_block_analyzer.py | Leo-Ryu/gtfspy | 732abdf6bfb6427454ac4c0a676dc3f8fc838cf4 | [
"MIT"
] | 118 | 2017-03-14T11:17:54.000Z | 2022-03-31T07:46:31.000Z | gtfspy/routing/profile_block_analyzer.py | Leo-Ryu/gtfspy | 732abdf6bfb6427454ac4c0a676dc3f8fc838cf4 | [
"MIT"
] | 27 | 2017-05-02T12:39:36.000Z | 2020-03-24T18:29:20.000Z | gtfspy/routing/profile_block_analyzer.py | Leo-Ryu/gtfspy | 732abdf6bfb6427454ac4c0a676dc3f8fc838cf4 | [
"MIT"
] | 29 | 2017-08-21T15:22:41.000Z | 2022-03-13T07:27:52.000Z | from collections import defaultdict
import numpy
from gtfspy.routing.profile_block import ProfileBlock
class ProfileBlockAnalyzer:
def __init__(self, profile_blocks, cutoff_distance=None, **kwargs):
"""
Parameters
----------
profile_blocks: list[gtfspy.routing.profile_block.ProfileBlock]
"""
for i, block in enumerate(profile_blocks[:-1]):
assert block.start_time < block.end_time
assert block.end_time == profile_blocks[i + 1].start_time
assert block.distance_start >= block.distance_end
self._profile_blocks = profile_blocks
self._start_time = profile_blocks[0].start_time
self._end_time = profile_blocks[-1].end_time
self._cutoff_distance = cutoff_distance
if cutoff_distance is not None:
self._apply_cutoff(cutoff_distance)
self.from_stop_I = None
self.to_stop_I = None
for key, value in kwargs.items():
if key == "from_stop_I":
self.from_stop_I = value
if key == "to_stop_I":
self.to_stop_I = value
def _apply_cutoff(self, cutoff_distance):
for block in list(self._profile_blocks):
block_max = max(block.distance_start, block.distance_end)
if block_max > cutoff_distance:
print("applying cutoff")
blocks = []
if block.distance_start == block.distance_end or \
(block.distance_start > cutoff_distance and block.distance_end > cutoff_distance):
blocks.append(
ProfileBlock(distance_end=cutoff_distance,
distance_start=cutoff_distance,
start_time=block.start_time,
end_time=block.end_time)
)
else:
if (block.distance_end >= cutoff_distance):
assert (block.distance_end < cutoff_distance)
split_point_x = block.start_time + (block.distance_start - cutoff_distance) / (
block.distance_start - block.distance_end) * block.width()
if block.distance_start > block.distance_end:
start_distance = cutoff_distance
end_distance = block.distance_end
else:
start_distance = block.distance_start
end_distance = cutoff_distance
first_block = ProfileBlock(block.start_time, split_point_x, start_distance, cutoff_distance)
second_block = ProfileBlock(split_point_x, block.end_time, cutoff_distance, end_distance)
blocks.append(first_block)
blocks.append(second_block)
index = self._profile_blocks.index(block)
self._profile_blocks[index:index + 1] = blocks
def mean(self):
total_width = self._profile_blocks[-1].end_time - self._profile_blocks[0].start_time
total_area = sum([block.area() for block in self._profile_blocks])
return total_area / total_width
def median(self):
try:
distance_split_points_ordered, norm_cdf = self._temporal_distance_cdf()
except RuntimeError as e:
return float('inf')
if len(distance_split_points_ordered) == 0:
return float('inf')
left = numpy.searchsorted(norm_cdf, 0.5, side="left")
right = numpy.searchsorted(norm_cdf, 0.5, side="right")
if left == len(norm_cdf):
return float('inf')
elif left == right:
left_cdf_val = norm_cdf[right - 1]
right_cdf_val = norm_cdf[right]
delta_y = right_cdf_val - left_cdf_val
assert (delta_y > 0)
delta_x = (distance_split_points_ordered[right] - distance_split_points_ordered[right - 1])
median = (0.5 - left_cdf_val) / delta_y * delta_x + distance_split_points_ordered[right - 1]
return median
else:
return distance_split_points_ordered[left]
def min(self):
return min([min(block.distance_end, block.distance_start) for block in self._profile_blocks])
def max(self):
return max([max(block.distance_end, block.distance_start) for block in self._profile_blocks])
def largest_finite_distance(self):
"""
Compute the maximum temporal distance.
Returns
-------
max_temporal_distance : float
"""
block_start_distances = [block.distance_start for block in self._profile_blocks if
block.distance_start < float('inf')]
block_end_distances = [block.distance_end for block in self._profile_blocks if
block.distance_end < float('inf')]
distances = block_start_distances + block_end_distances
if len(distances) > 0:
return max(distances)
else:
return None
def summary_as_dict(self):
summary = {"max": self.max(),
"min": self.min(),
"mean": self.mean(),
"median": self.median()}
if hasattr(self, "from_stop_I"):
summary['from_stop_I'] = self.from_stop_I
if hasattr(self, "to_stop_I"):
summary['to_stop_I'] = self.to_stop_I
return summary
def _temporal_distance_cdf(self):
"""
Temporal distance cumulative density function.
Returns
-------
x_values: numpy.array
values for the x-axis
cdf: numpy.array
cdf values
"""
distance_split_points = set()
for block in self._profile_blocks:
if block.distance_start != float('inf'):
distance_split_points.add(block.distance_end)
distance_split_points.add(block.distance_start)
distance_split_points_ordered = numpy.array(sorted(list(distance_split_points)))
temporal_distance_split_widths = distance_split_points_ordered[1:] - distance_split_points_ordered[:-1]
trip_counts = numpy.zeros(len(temporal_distance_split_widths))
delta_peaks = defaultdict(lambda: 0)
for block in self._profile_blocks:
if block.distance_start == block.distance_end:
delta_peaks[block.distance_end] += block.width()
else:
start_index = numpy.searchsorted(distance_split_points_ordered, block.distance_end)
end_index = numpy.searchsorted(distance_split_points_ordered, block.distance_start)
trip_counts[start_index:end_index] += 1
unnormalized_cdf = numpy.array([0] + list(numpy.cumsum(temporal_distance_split_widths * trip_counts)))
if not (numpy.isclose(
[unnormalized_cdf[-1]],
[self._end_time - self._start_time - sum(delta_peaks.values())], atol=1E-4
).all()):
print(unnormalized_cdf[-1], self._end_time - self._start_time - sum(delta_peaks.values()))
raise RuntimeError("Something went wrong with cdf computation!")
if len(delta_peaks) > 0:
for peak in delta_peaks.keys():
if peak == float('inf'):
continue
index = numpy.nonzero(distance_split_points_ordered == peak)[0][0]
unnormalized_cdf = numpy.insert(unnormalized_cdf, index, unnormalized_cdf[index])
distance_split_points_ordered = numpy.insert(distance_split_points_ordered, index,
distance_split_points_ordered[index])
# walk_waiting_time_fraction = walk_total_time / (self.end_time_dep - self.start_time_dep)
unnormalized_cdf[(index + 1):] = unnormalized_cdf[(index + 1):] + delta_peaks[peak]
norm_cdf = unnormalized_cdf / (unnormalized_cdf[-1] + delta_peaks[float('inf')])
return distance_split_points_ordered, norm_cdf
def _temporal_distance_pdf(self):
"""
Temporal distance probability density function.
Returns
-------
non_delta_peak_split_points: numpy.array
non_delta_peak_densities: numpy.array
len(density) == len(temporal_distance_split_points_ordered) -1
delta_peak_loc_to_probability_mass : dict
"""
temporal_distance_split_points_ordered, norm_cdf = self._temporal_distance_cdf()
delta_peak_loc_to_probability_mass = {}
non_delta_peak_split_points = [temporal_distance_split_points_ordered[0]]
non_delta_peak_densities = []
for i in range(0, len(temporal_distance_split_points_ordered) - 1):
left = temporal_distance_split_points_ordered[i]
right = temporal_distance_split_points_ordered[i + 1]
width = right - left
prob_mass = norm_cdf[i + 1] - norm_cdf[i]
if width == 0.0:
delta_peak_loc_to_probability_mass[left] = prob_mass
else:
non_delta_peak_split_points.append(right)
non_delta_peak_densities.append(prob_mass / float(width))
assert (len(non_delta_peak_densities) == len(non_delta_peak_split_points) - 1)
return numpy.array(non_delta_peak_split_points), \
numpy.array(non_delta_peak_densities), delta_peak_loc_to_probability_mass
def get_vlines_and_slopes_for_plotting(self):
vertical_lines = []
slopes = []
for i, block in enumerate(self._profile_blocks):
distance_end_minutes = block.distance_end
distance_start_minutes = block.distance_start
slope = dict(x=[block.start_time, block.end_time],
y=[distance_start_minutes, distance_end_minutes])
slopes.append(slope)
if i != 0:
# no vertical line for the first observation
previous_duration_minutes = self._profile_blocks[i - 1].distance_end
vertical_lines.append(dict(x=[block.start_time, block.start_time],
y=[previous_duration_minutes, distance_start_minutes]))
return vertical_lines, slopes
def get_blocks(self):
return self._profile_blocks
def interpolate(self, time):
assert(self._start_time <= time <= self._end_time)
for profile_block in self._profile_blocks:
# find the first block whose end time is larger than or equal to that of the queried time
if profile_block.end_time >= time:
return profile_block.interpolate(time)
| 44.241803 | 112 | 0.612506 |
ab2d4b40fa4f5006947c014311ae509badd9c227 | 6,432 | py | Python | monai/apps/deepedit/transforms.py | finalelement/MONAI | 8e8e1b391fa649d1227087164dba208008d00bc4 | [
"Apache-2.0"
] | null | null | null | monai/apps/deepedit/transforms.py | finalelement/MONAI | 8e8e1b391fa649d1227087164dba208008d00bc4 | [
"Apache-2.0"
] | null | null | null | monai/apps/deepedit/transforms.py | finalelement/MONAI | 8e8e1b391fa649d1227087164dba208008d00bc4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from typing import Dict, Hashable, Mapping, Tuple
import numpy as np
from monai.config import KeysCollection
from monai.transforms.transform import MapTransform, Randomizable, Transform
from monai.utils import optional_import
logger = logging.getLogger(__name__)
distance_transform_cdt, _ = optional_import("scipy.ndimage.morphology", name="distance_transform_cdt")
class DiscardAddGuidanced(MapTransform):
def __init__(self, keys: KeysCollection, probability: float = 1.0, allow_missing_keys: bool = False):
"""
Discard positive and negative points randomly or Add the two channels for inference time
:param probability: Discard probability; For inference it will be always 1.0
"""
super().__init__(keys, allow_missing_keys)
self.probability = probability
def _apply(self, image):
if self.probability >= 1.0 or np.random.choice([True, False], p=[self.probability, 1 - self.probability]):
signal = np.zeros((1, image.shape[-3], image.shape[-2], image.shape[-1]), dtype=np.float32)
if image.shape[0] == 3:
image[1] = signal
image[2] = signal
else:
image = np.concatenate((image, signal, signal), axis=0)
return image
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d: Dict = dict(data)
for key in self.key_iterator(d):
if key == "image":
d[key] = self._apply(d[key])
else:
print("This transform only applies to the image")
return d
class ResizeGuidanceCustomd(Transform):
"""
Resize the guidance based on cropped vs resized image.
"""
def __init__(self, guidance: str, ref_image: str) -> None:
self.guidance = guidance
self.ref_image = ref_image
def __call__(self, data):
d = dict(data)
current_shape = d[self.ref_image].shape[1:]
factor = np.divide(current_shape, d["image_meta_dict"]["dim"][1:4])
pos_clicks, neg_clicks = d["foreground"], d["background"]
pos = np.multiply(pos_clicks, factor).astype(int).tolist() if len(pos_clicks) else []
neg = np.multiply(neg_clicks, factor).astype(int).tolist() if len(neg_clicks) else []
d[self.guidance] = [pos, neg]
return d
class ClickRatioAddRandomGuidanced(Randomizable, Transform):
"""
Add random guidance based on discrepancies that were found between label and prediction.
Args:
guidance: key to guidance source, shape (2, N, # of dim)
discrepancy: key that represents discrepancies found between label and prediction, shape (2, C, D, H, W) or (2, C, H, W)
probability: key that represents click/interaction probability, shape (1)
fn_fp_click_ratio: ratio of clicks between FN and FP
"""
def __init__(
self,
guidance: str = "guidance",
discrepancy: str = "discrepancy",
probability: str = "probability",
fn_fp_click_ratio: Tuple[float, float] = (1.0, 1.0),
):
self.guidance = guidance
self.discrepancy = discrepancy
self.probability = probability
self.fn_fp_click_ratio = fn_fp_click_ratio
self._will_interact = None
def randomize(self, data=None):
probability = data[self.probability]
self._will_interact = self.R.choice([True, False], p=[probability, 1.0 - probability])
def find_guidance(self, discrepancy):
distance = distance_transform_cdt(discrepancy).flatten()
probability = np.exp(distance) - 1.0
idx = np.where(discrepancy.flatten() > 0)[0]
if np.sum(discrepancy > 0) > 0:
seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx]))
dst = distance[seed]
g = np.asarray(np.unravel_index(seed, discrepancy.shape)).transpose().tolist()[0]
g[0] = dst[0]
return g
return None
def add_guidance(self, discrepancy, will_interact):
if not will_interact:
return None, None
pos_discr = discrepancy[0]
neg_discr = discrepancy[1]
can_be_positive = np.sum(pos_discr) > 0
can_be_negative = np.sum(neg_discr) > 0
pos_prob = self.fn_fp_click_ratio[0] / (self.fn_fp_click_ratio[0] + self.fn_fp_click_ratio[1])
neg_prob = self.fn_fp_click_ratio[1] / (self.fn_fp_click_ratio[0] + self.fn_fp_click_ratio[1])
correct_pos = self.R.choice([True, False], p=[pos_prob, neg_prob])
if can_be_positive and not can_be_negative:
return self.find_guidance(pos_discr), None
if not can_be_positive and can_be_negative:
return None, self.find_guidance(neg_discr)
if correct_pos and can_be_positive:
return self.find_guidance(pos_discr), None
if not correct_pos and can_be_negative:
return None, self.find_guidance(neg_discr)
return None, None
def _apply(self, guidance, discrepancy):
guidance = guidance.tolist() if isinstance(guidance, np.ndarray) else guidance
guidance = json.loads(guidance) if isinstance(guidance, str) else guidance
pos, neg = self.add_guidance(discrepancy, self._will_interact)
if pos:
guidance[0].append(pos)
guidance[1].append([-1] * len(pos))
if neg:
guidance[0].append([-1] * len(neg))
guidance[1].append(neg)
return json.dumps(np.asarray(guidance).astype(int).tolist())
def __call__(self, data):
d = dict(data)
guidance = d[self.guidance]
discrepancy = d[self.discrepancy]
self.randomize(data)
d[self.guidance] = self._apply(guidance, discrepancy)
return d
| 37.835294 | 128 | 0.650964 |
72aabd7ef33ba39ea61d865de066b8584528969e | 756 | py | Python | Code/py3/archived/1-50/15.py | ApocalypseMac/Leetcode | 84c229eaf5a2e617ca00cabed04dd76d508d60b8 | [
"MIT"
] | 1 | 2020-12-03T13:00:38.000Z | 2020-12-03T13:00:38.000Z | Code/py3/archived/1-50/15.py | ApocalypseMac/Leetcode | 84c229eaf5a2e617ca00cabed04dd76d508d60b8 | [
"MIT"
] | null | null | null | Code/py3/archived/1-50/15.py | ApocalypseMac/Leetcode | 84c229eaf5a2e617ca00cabed04dd76d508d60b8 | [
"MIT"
] | 2 | 2020-07-27T14:39:45.000Z | 2020-08-26T16:41:15.000Z | class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
# sort + two pointers
nums.sort()
result = set()
for i in range(len(nums) - 2):
if nums[i] > 0:
break
if i > 0 and nums[i] == nums[i - 1]:
continue
temp = -nums[i]
lo, hi = i + 1, len(nums) - 1
while lo < hi:
if nums[lo] + nums[hi] < temp:
lo += 1
elif nums[lo] + nums[hi] > temp:
hi -= 1
elif nums[lo] + nums[hi] == temp:
result.add((nums[i], nums[lo], nums[hi]))
lo += 1
hi -= 1
return list(result) | 34.363636 | 61 | 0.375661 |
f1b01a918a8a17a09d54b122335ce33b2323ff8d | 15,230 | py | Python | pixiedust.py | mjpieters/pixiedust | b7a58ddd29082daf9bfb7d42233df7174570a998 | [
"MIT"
] | 2 | 2018-09-29T09:27:01.000Z | 2019-12-01T15:19:31.000Z | pixiedust.py | mjpieters/pixiedust | b7a58ddd29082daf9bfb7d42233df7174570a998 | [
"MIT"
] | null | null | null | pixiedust.py | mjpieters/pixiedust | b7a58ddd29082daf9bfb7d42233df7174570a998 | [
"MIT"
] | null | null | null | # Copyright 2018 Martijn Pieters, Zopatista Ltd.
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE.txt file for details.
import collections
import itertools
import operator
import re
import sqlite3
import struct
import sys
from functools import partial
from itertools import islice, takewhile
illegal = re.compile(r"[^*+.\s]").search
tokenizer = re.compile(r"[*+.]").findall
class opcode:
"""Descriptor / decorator for operator methods"""
def __init__(self, tokens, fget=None):
self.tokens = tokens
self.fget = fget
def __set_name__(self, owner, name):
owner.opcodes[self.tokens] = self.fget
def __call__(self, fget):
return type(self)(self.tokens, fget)
def __get__(self, instance, owner):
if instance is None:
return self
return self.fget.__get__(instance, owner)
class Opcodes(dict):
"""Operator registry
On first access on an instance, the registered opcode functions are bound
to the instance the result is cached. Opcodes are executed on access.
"""
def __init__(self, items=(), name=None, instance=None):
self.name = name
self.instance = instance
if instance is not None:
# bind everything to instance just once
items = ((t, o.__get__(instance)) for t, o in items)
super().__init__(items)
def __set_name__(self, owner, name):
self.name = name
def __get__(self, instance, owner):
if instance is None:
return self
# cache on get, to get out of the way next time
bound = instance.__dict__[self.name] = Opcodes(
self.items(), name=self.name, instance=instance
)
return bound
def __getitem__(self, opcode):
return super().__getitem__(opcode)()
def __missing__(self, opcode):
"""Handle intermediate opcodes
For opcodes like ++, the + prefix is not registered
but this handler creates one which passes on the call
to the composite token.
"""
# Potentially this could lead to handlers being generated
# for non-existing tokens, but instruction lines are
# not infinite, so it'll be fine.
map = getattr(self.instance, self.name)
def intermediate(self):
return map[opcode + self.next_token()]
bound = self[opcode] = intermediate.__get__(self.instance)
return bound
class SQLiteMemory:
"""31-bit addressable memory for PixieDust programs
Memory cells are 4-byte words, containing signed integers.
"""
# memory is swapped out to a sqlite table! Because I don't want to think
# about someone actually using the full address space, sqlite3 would
# neatly handle this by swapping to temp disk space instead.
# default page size gives us pages with ~128MB of Python integer storage,
# and there are 1024 pages. With 32 pages active and full, about 4GB
# of Python heap memory is used.
def __init__(self, page_size=2 ** 21, max_active=32):
self._page_size = page_size
self._max_active = max_active
self._pages = collections.OrderedDict()
self._conn = sqlite3.connect("") # sqlite opens a temp file for this
self._conn.execute(
"""
CREATE TABLE memory (
page INT, address INT, value INT,
PRIMARY KEY(page, address)
)
"""
)
self._cursor = self._conn.cursor()
def _get_page(self, pagenum):
if pagenum in self._pages:
self._pages.move_to_end(pagenum)
return self._pages[pagenum]
with self._conn:
self._pages[pagenum] = page = dict(
self._cursor.execute(
"""
SELECT address, value FROM memory
WHERE page = ?
""",
(pagenum,),
)
)
self._maybe_evict()
return page
def _maybe_evict(self):
if len(self._pages) > self._max_active:
pagenum, page = self._pages.popitem(last=False)
with self._conn:
self._cursor.executemany(
f"""
INSERT OR REPLACE INTO memory (page, address, value)
VALUES ({pagenum}, ?, ?)
""",
page.items(),
)
def __setitem__(self, address, value):
address = address & 0x7FFFFFFF
pagenum, subaddress = address // self._page_size, address % self._page_size
self._get_page(pagenum)[subaddress] = value
def __getitem__(self, address):
address = address & 0x7FFFFFFF
pagenum, subaddress = address // self._page_size, address % self._page_size
return self._get_page(pagenum).get(subaddress, 0)
def _offset_missing():
raise RuntimeError("Missing label offset identity callable")
# label offset dummy
_offset_placeholder = _offset_missing, 0
# mapping pixiedust characters to bits for the .* literal syntax
_dustbin_map = str.maketrans(".+", "01")
# handle casting signed integers by packing into to 8 long long bytes, then
# slicing back target size. with 8 bytes we can handle any overflow scenario.
# You can't use masking as that casts to an unsigned int instead.
signed32bit = (
(partial(struct.pack, "!q"), 1),
(operator.itemgetter(slice(-4, None)), 1),
(partial(struct.unpack, "!l"), 1),
(operator.itemgetter(0), 1),
)
class PixieDust:
# PixieDust interpreter. Compiles instructions to a tuple of
# (callable, argcount) operations each. Results are pushed on
# a stack, and callables are passed argcount top values from
# the stack.
opcodes = Opcodes()
def __init__(self, stdout=sys.stdout, stdin=sys.stdin):
self.registers = {}
self.memory = SQLiteMemory()
self.stdout = stdout
self.stdin = stdin
# program execution
def execute(self, dust):
instructions = self.compile(dust)
self.pos = 0
while 0 <= self.pos < len(instructions):
# An instruction consists of (callable, argcount) entries,
# where argcount is passed the most recent argcount of results,
# in stack order (top-most first)
stack = collections.deque()
for op, count in instructions[self.pos]:
args = (stack.pop() for _ in itertools.repeat(None, count))
stack.append(op(*args))
self.pos += 1
def compile(self, dust):
"""Convert instructions to a series of (operation, argcount) sequences"""
self.labels = {}
self.label_jumps = {}
compiled = []
for i, instruction in enumerate(dust.splitlines()):
self.pos = i
if illegal(instruction):
raise SyntaxError(f"Invalid characters on line {self.pos + 1}")
self.tokens = iter(tokenizer(instruction))
self.next_token = partial(next, self.tokens)
try:
compiled.append(self.opcodes[self.next_token()])
except StopIteration:
raise SyntaxError(
f"Missing instruction characters on line {self.pos + 1}"
)
if next(self.tokens, None) is not None:
raise SyntaxError(f"Trailing characters on line {self.pos + 1}")
# set jump offsets, needs to be done at the end when all label targets
# have been processed.
for label, positions in self.label_jumps.items():
try:
target = self.labels[label]
except KeyError:
# jump to non-existing label
raise SyntaxError(f"Invalid label target on line {positions[0] + 1}")
# replace offset placeholder with actual relative offset
for pos in positions:
assert compiled[pos][0] is _offset_placeholder
offset_op = partial(int, target - pos), 0
compiled[pos] = (offset_op, *compiled[pos][1:])
return compiled
# register handling
def compile_register_set(self, register=None):
"""Return operations that sets the register to a value on the stack"""
if register is None:
register = self.next_token() + self.next_token()
if register not in {"*.", "*+", ".*"}:
return (
*signed32bit,
(partial(operator.setitem, self.registers, register), 1),
)
elif register == "*+": # value as Unicode char to stdout
# mask the integer value and convert to a unicode character first.
# this should be a Java (char) 16 bit range, not full Unicode
return (
(partial(operator.and_, 0xFFFF), 1),
(chr, 1),
(partial(self.stdout.write), 1),
)
elif register == "*.": # memory access
# fetch the ** register first, then set the memory value with that result
rget = partial(self.registers.get, "**", 0), 0
mset = partial(operator.setitem, self.memory), 2
return (*signed32bit, rget, mset)
# reserved for future use
raise SyntaxError(f"No such register: {register}, on line {self.pos + 1}")
def compile_register_get(self, register=None, _b=_dustbin_map):
"""Return operations that produce the register value"""
if register is None:
register = self.next_token() + self.next_token()
if register not in {"*.", "*+", ".*"}:
return ((partial(self.registers.get, register, 0), 0),)
elif register == "*+": # read a unicode character from stdin
# convert the character read to a 16-bit signed integer
return (
(partial(self.stdin.read, 1), 0),
(ord, 1),
(partial(operator.and_, 0xFFFF), 1),
)
elif register == "*.": # memory access
# fetch the ** register first, then fetch the memory value with that result
rget = partial(self.registers.get, "**", 0), 0
mget = partial(operator.getitem, self.memory), 1
return rget, mget
elif register == ".*": # literal value
# consume the literal tokens
bits = "".join(takewhile(lambda t: t != "*", islice(self.tokens, 33)))
if len(bits) >= 33:
# too many bits
raise SyntaxError(f"Invalid number literal on line {self.pos + 1}")
neg = len(bits) > 31 and bits[0] == "+"
value = int(bits[-31:].translate(_b) or "0", 2) - (0x80000000 if neg else 0)
return ((partial(int, value), 0),)
# opcode implementation
# * O R X Y is a mathematical operation
#
# O specifies the operation to use: ...
# R specifies the register to store the result to.
# X and Y are expressions.
@opcode("*.")
def op_math_copy(self):
"""* O: . for copy
For a copy operation, Y should be omitted.
"""
register_set = self.compile_register_set()
x_get = self.compile_register_get()
return (*x_get, *register_set)
@opcode("*+")
def op_math_add_sub(self, _o={"+": operator.add, ".": operator.sub}): # noqa B006
"""* O: ++ for addition, +. for subtraction"""
try:
oper = _o[self.next_token()], 2
except KeyError as e:
# *+* is reserved for future use.
raise SyntaxError(
f"No such math operator: *+{e.args[0]}, on line {self.pos + 1}"
)
register_set = self.compile_register_set()
x_get = self.compile_register_get()
y_get = self.compile_register_get()
return (*y_get, *x_get, oper, *register_set)
@opcode("**")
def op_math_mul_div_mod(
self,
_o={"*": operator.mul, ".": operator.floordiv, "+": operator.mod}, # noqa B006
):
"""* O: ** for multiplication, *. for division, *+ for modulo"""
oper = _o[self.next_token()], 2
register_set = self.compile_register_set()
x_get = self.compile_register_get()
y_get = self.compile_register_get()
# put y on the stack first
return (*y_get, *x_get, oper, *register_set)
@opcode(".")
def op_comp(
self, _c={"*": operator.eq, "+": operator.lt, ".": operator.gt} # noqa B006
):
""". C X Y performs the comparison specified by C
... and stores it with 0/1 in the .. register
=<> are indicated by *+., respectively. X and Y are expressions.
"""
comp = (_c[self.next_token()], 2), (int, 1)
x_get = self.compile_register_get()
y_get = self.compile_register_get()
register_set = self.compile_register_set("..")
return (*y_get, *x_get, *comp, *register_set)
@opcode("++")
def op_print(self):
"""++ X prints the Unicode character represented by expression X to STDOUT."""
x_get = self.compile_register_get()
# mask to Java char range
print_ops = self.compile_register_set("*+")
return (*x_get, *print_ops)
@opcode("+.")
def op_set_label(self):
"""+. L defines a program label; L can be any number of characters."""
label = "".join(self.tokens)
if label in self.labels:
raise SyntaxError(
f"Re-definition of label {label!r} on line {self.pos + 1}"
)
self.labels[label] = self.pos
return () # return noop to preserve instruction positions
@opcode("+*")
def op_jump_label(self, _t={"*": operator.truth, ".": operator.not_}): # noqa B006
"""+* T L jumps to label L based on the condition T.
T can be
* to jump if .. is not 0,
. to jump if .. is 0, or
+ to jump regardless of the value in ...
"""
try:
test_op = _t[self.next_token()], 1
except KeyError:
# jump unconditional, no test and adjustment needed
test_ops = ()
else:
register_get = self.compile_register_get("..")
# Take the test output (True or False) and multiply this with the offset
# The result is either the offset, or 0
adjust_offset_ops = operator.mul, 2
test_ops = (*register_get, test_op, adjust_offset_ops)
# register the target for the compiler to later on insert
# an offset into the operations
label = "".join(self.tokens)
self.label_jumps.setdefault(label, []).append(self.pos)
# add the (updated) offset to self.pos
update_pos_op = (
(partial(getattr, self, "pos"), 0),
(operator.add, 2),
(partial(setattr, self, "pos"), 1),
)
return (_offset_placeholder, *test_ops, *update_pos_op)
if __name__ == "__main__":
duster = PixieDust()
with open(sys.argv[1], "r") as instructions:
duster.execute(instructions.read())
| 35.835294 | 88 | 0.582272 |
61dccf92429f207a3ebf63d60148170087b3e979 | 1,220 | py | Python | tests/query_runner/test_utils.py | jodevsa/redash | 021068688db82e3a7092b4bb202e37c652bd6f64 | [
"BSD-2-Clause"
] | 3 | 2019-06-16T14:46:05.000Z | 2021-11-09T11:27:18.000Z | tests/query_runner/test_utils.py | jodevsa/redash | 021068688db82e3a7092b4bb202e37c652bd6f64 | [
"BSD-2-Clause"
] | 187 | 2019-08-14T02:55:59.000Z | 2022-03-22T17:55:17.000Z | tests/query_runner/test_utils.py | jodevsa/redash | 021068688db82e3a7092b4bb202e37c652bd6f64 | [
"BSD-2-Clause"
] | 4 | 2019-07-01T06:15:44.000Z | 2021-12-11T11:17:08.000Z | # -*- coding: utf-8 -*-
from unittest import TestCase
from redash.query_runner import TYPE_DATETIME, TYPE_FLOAT, TYPE_INTEGER, TYPE_BOOLEAN, TYPE_STRING
from redash.query_runner.drill import guess_type
class TestGuessType(TestCase):
def test_handles_unicode(self):
self.assertEqual(guess_type(u'Текст'), TYPE_STRING)
def test_detects_booleans(self):
self.assertEqual(guess_type('true'), TYPE_BOOLEAN)
self.assertEqual(guess_type('True'), TYPE_BOOLEAN)
self.assertEqual(guess_type('TRUE'), TYPE_BOOLEAN)
self.assertEqual(guess_type('false'), TYPE_BOOLEAN)
self.assertEqual(guess_type('False'), TYPE_BOOLEAN)
self.assertEqual(guess_type('FALSE'), TYPE_BOOLEAN)
def test_detects_strings(self):
self.assertEqual(guess_type(None), TYPE_STRING)
self.assertEqual(guess_type(''), TYPE_STRING)
self.assertEqual(guess_type('redash'), TYPE_STRING)
def test_detects_integer(self):
self.assertEqual(guess_type('42'), TYPE_INTEGER)
def test_detects_float(self):
self.assertEqual(guess_type('3.14'), TYPE_FLOAT)
def test_detects_date(self):
self.assertEqual(guess_type('2018-10-31'), TYPE_DATETIME)
| 36.969697 | 98 | 0.722131 |
a56c72b12bf4c3e652d1b15e5b9823230bc331b3 | 40 | py | Python | facetools/test/common.py | bigsassy/django-facetools | aeedaea81ab0007ee8e96b2f81f1404dc8bddb3c | [
"MIT"
] | 2 | 2018-01-24T20:41:27.000Z | 2019-06-27T13:24:18.000Z | facetools/test/common.py | bigsassy/django-facetools | aeedaea81ab0007ee8e96b2f81f1404dc8bddb3c | [
"MIT"
] | null | null | null | facetools/test/common.py | bigsassy/django-facetools | aeedaea81ab0007ee8e96b2f81f1404dc8bddb3c | [
"MIT"
] | null | null | null | class TestUserNotLoaded(Exception): pass | 40 | 40 | 0.875 |
7ce85d0064e5fd929b5380b2ff8a8318564770ab | 4,159 | py | Python | 01-Lesson-Plans/15-Algorithmic-Trading/2/Activities/06-Evr_Async_Trading/Solved/jarvis-text.py | tatianegercina/FinTech | b40687aa362d78674e223eb15ecf14bc59f90b62 | [
"ADSL"
] | 1 | 2021-04-13T07:14:34.000Z | 2021-04-13T07:14:34.000Z | 01-Lesson-Plans/15-Algorithmic-Trading/2/Activities/06-Evr_Async_Trading/Solved/jarvis-text.py | tatianegercina/FinTech | b40687aa362d78674e223eb15ecf14bc59f90b62 | [
"ADSL"
] | 2 | 2021-06-02T03:14:19.000Z | 2022-02-11T23:21:24.000Z | 01-Lesson-Plans/15-Algorithmic-Trading/2/Activities/06-Evr_Async_Trading/Solved/jarvis-text.py | tatianegercina/FinTech | b40687aa362d78674e223eb15ecf14bc59f90b62 | [
"ADSL"
] | 1 | 2021-05-07T13:26:50.000Z | 2021-05-07T13:26:50.000Z | import os
import ccxt
import asyncio
import numpy as np
import pandas as pd
from dotenv import load_dotenv
import matplotlib.pyplot as plt
def initialize(cash=None):
"""Initialize the plot, data storage, and account balances."""
print("Initializing Account and DataFrame")
# @TODO: Update to build the plot
# Initialize Account
account = {"balance": cash, "shares": 0}
# Initialize DataFrame
# @TODO: We will update this later!
df = fetch_data()
# Initialize the plot
# build_plot(df)
# @TODO: We will complete the rest of this later!
return account, df
def build_plot(df):
"""Build the plot."""
# @TODO: Build the Initial Plot!
print("Initializing plot")
plot = df.plot(title="Current BTC/USD Price")
return
# @TODO: Create a function to update the plot!
def update_plot(df):
"""Update the plot."""
plot = df.plot(title="Current BTC/USD Price")
return
def fetch_data():
"""Fetches the latest prices."""
print("Fetching data...")
load_dotenv()
kraken_public_key = os.getenv("KRAKEN_PUBLIC_KEY")
kraken_secret_key = os.getenv("KRAKEN_SECRET_KEY")
kraken = ccxt.kraken({"apiKey": kraken_public_key, "secret": kraken_secret_key})
close = kraken.fetch_ticker("BTC/USD")["close"]
datetime = kraken.fetch_ticker("BTC/USD")["datetime"]
df = pd.DataFrame({"close": [close]})
df.index = pd.to_datetime([datetime])
return df
def generate_signals(df):
"""Generates trading signals for a given dataset."""
print("-----> Generating trading signals <-----")
# Set window
short_window = 10
signals = df.copy()
signals["signal"] = 0.0
# Generate the short and long moving averages
signals["sma10"] = signals["close"].rolling(window=10).mean()
signals["sma20"] = signals["close"].rolling(window=20).mean()
# Generate the trading signal 0 or 1,
signals["signal"][short_window:] = np.where(
signals["sma10"][short_window:] > signals["sma20"][short_window:], 1.0, 0.0
)
# Calculate the points in time at which a position should be taken, 1 or -1
signals["entry/exit"] = signals["signal"].diff()
print("-----> Trading signals generated <-----")
return signals
def execute_trade_strategy(signals, account):
"""Makes a buy/sell/hold decision."""
print("**Executing Trading Strategy**")
if signals["entry/exit"].iloc[-1] == 1.0:
print("Buy")
number_to_buy = round(account["balance"] / signals["close"].iloc[-1], 0) * 0.001
account["balance"] -= number_to_buy * signals["close"].iloc[-1]
account["shares"] += number_to_buy
elif signals["entry/exit"].iloc[-1] == -1.0:
print("Sell")
account["balance"] += signals["close"].iloc[-1] * account["shares"]
account["shares"] = 0
else:
print("Hold")
print(f"Account balance: ${account['balance']}")
print(f"Account shares : {account['shares']}")
print("**Trading Strategy Executed**")
return account
# @TODO: Set the initial configurations and update the main loop to use asyncio
# Set the initial account configuration
account, df = initialize(10000)
# Turns on the interactive mode of matplotlib (https://matplotlib.org/api/_as_gen/matplotlib.pyplot.ion.html)
plt.ion()
# Show the initial line chart
plt.show()
async def main():
loop = asyncio.get_event_loop()
while True:
global account
global df
# Fetch new prices data
new_df = await loop.run_in_executor(None, fetch_data)
df = df.append(new_df, ignore_index=True)
# Execute the trading strategy
min_window = 22
if df.shape[0] >= min_window:
signals = generate_signals(df)
account = execute_trade_strategy(signals, account)
# Update the plot
# update_plot(df)
# Update line chart
plt.pause(1)
# Refresh the matplotlib plotting area to avoid extra memory consumption
plt.close()
await asyncio.sleep(1)
# Python 3.7+
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 27.183007 | 109 | 0.642222 |
c81e07a734798d379dfd082b4cc75a3d06330183 | 7,214 | py | Python | modified_gym/envs/registration.py | mk37972/SCAPE | 01080e4159917546c76dd15ae5c74e092f4ae299 | [
"MIT"
] | null | null | null | modified_gym/envs/registration.py | mk37972/SCAPE | 01080e4159917546c76dd15ae5c74e092f4ae299 | [
"MIT"
] | null | null | null | modified_gym/envs/registration.py | mk37972/SCAPE | 01080e4159917546c76dd15ae5c74e092f4ae299 | [
"MIT"
] | null | null | null | import re
import importlib
import warnings
from modified_gym import error, logger
# This format is true today, but it's *not* an official spec.
# [username/](env-name)-v(version) env-name is group 1, version is group 2
#
# 2016-10-31: We're experimentally expanding the environment ID format
# to include an optional username.
env_id_re = re.compile(r'^(?:[\w:-]+\/)?([\w:.-]+)-v(\d+)$')
def load(name):
mod_name, attr_name = name.split(":")
mod = importlib.import_module(mod_name)
fn = getattr(mod, attr_name)
return fn
class EnvSpec(object):
"""A specification for a particular instance of the environment. Used
to register the parameters for official evaluations.
Args:
id (str): The official environment ID
entry_point (Optional[str]): The Python entrypoint of the environment class (e.g. module.name:Class)
reward_threshold (Optional[int]): The reward threshold before the task is considered solved
kwargs (dict): The kwargs to pass to the environment class
nondeterministic (bool): Whether this environment is non-deterministic even after seeding
tags (dict[str:any]): A set of arbitrary key-value tags on this environment, including simple property=True tags
max_episode_steps (Optional[int]): The maximum number of steps that an episode can consist of
Attributes:
id (str): The official environment ID
"""
def __init__(self, id, entry_point=None, reward_threshold=None, kwargs=None, nondeterministic=False, tags=None, max_episode_steps=None):
self.id = id
# Evaluation parameters
self.reward_threshold = reward_threshold
# Environment properties
self.nondeterministic = nondeterministic
self.entry_point = entry_point
if tags is None:
tags = {}
self.tags = tags
tags['wrapper_config.TimeLimit.max_episode_steps'] = max_episode_steps
self.max_episode_steps = max_episode_steps
# We may make some of these other parameters public if they're
# useful.
match = env_id_re.search(id)
if not match:
raise error.Error('Attempted to register malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id, env_id_re.pattern))
self._env_name = match.group(1)
self._kwargs = {} if kwargs is None else kwargs
def make(self, **kwargs):
"""Instantiates an instance of the environment with appropriate kwargs"""
print(self.entry_point)
if self.entry_point is None:
raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))
_kwargs = self._kwargs.copy()
_kwargs.update(kwargs)
if callable(self.entry_point):
env = self.entry_point(**_kwargs)
else:
cls = load(self.entry_point)
env = cls(**_kwargs)
# Make the enviroment aware of which spec it came from.
env.unwrapped.spec = self
return env
def __repr__(self):
return "EnvSpec({})".format(self.id)
class EnvRegistry(object):
"""Register an env by ID. IDs remain stable over time and are
guaranteed to resolve to the same environment dynamics (or be
desupported). The goal is that results on a particular environment
should always be comparable, and not depend on the version of the
code that was running.
"""
def __init__(self):
self.env_specs = {}
def make(self, path, **kwargs):
if len(kwargs) > 0:
logger.info('Making new env: %s (%s)', path, kwargs)
else:
logger.info('Making new env: %s', path)
spec = self.spec(path)
env = spec.make(**kwargs)
# We used to have people override _reset/_step rather than
# reset/step. Set _gym_disable_underscore_compat = True on
# your environment if you use these methods and don't want
# compatibility code to be invoked.
if hasattr(env, "_reset") and hasattr(env, "_step") and not getattr(env, "_gym_disable_underscore_compat", False):
patch_deprecated_methods(env)
if (env.spec.max_episode_steps is not None) and not spec.tags.get('vnc'):
from modified_gym.wrappers.time_limit import TimeLimit
env = TimeLimit(env, max_episode_steps=env.spec.max_episode_steps)
return env
def all(self):
return self.env_specs.values()
def spec(self, path):
if ':' in path:
mod_name, _sep, id = path.partition(':')
try:
importlib.import_module(mod_name)
# catch ImportError for python2.7 compatibility
except ImportError:
raise error.Error('A module ({}) was specified for the environment but was not found, make sure the package is installed with `pip install` before calling `gym.make()`'.format(mod_name))
else:
id = path
match = env_id_re.search(id)
if not match:
raise error.Error('Attempted to look up malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id.encode('utf-8'), env_id_re.pattern))
try:
return self.env_specs[id]
except KeyError:
# Parse the env name and check to see if it matches the non-version
# part of a valid env (could also check the exact number here)
env_name = match.group(1)
matching_envs = [valid_env_name for valid_env_name, valid_env_spec in self.env_specs.items()
if env_name == valid_env_spec._env_name]
if matching_envs:
raise error.DeprecatedEnv('Env {} not found (valid versions include {})'.format(id, matching_envs))
else:
raise error.UnregisteredEnv('No registered env with id: {}'.format(id))
def register(self, id, **kwargs):
if id in self.env_specs:
raise error.Error('Cannot re-register id: {}'.format(id))
self.env_specs[id] = EnvSpec(id, **kwargs)
# Have a global registry
registry = EnvRegistry()
def register(id, **kwargs):
return registry.register(id, **kwargs)
def make(id, **kwargs):
return registry.make(id, **kwargs)
def spec(id):
return registry.spec(id)
warn_once = True
def patch_deprecated_methods(env):
"""
Methods renamed from '_method' to 'method', render() no longer has 'close' parameter, close is a separate method.
For backward compatibility, this makes it possible to work with unmodified environments.
"""
global warn_once
if warn_once:
logger.warn("Environment '%s' has deprecated methods '_step' and '_reset' rather than 'step' and 'reset'. Compatibility code invoked. Set _gym_disable_underscore_compat = True to disable this behavior." % str(type(env)))
warn_once = False
env.reset = env._reset
env.step = env._step
env.seed = env._seed
def render(mode):
return env._render(mode, close=False)
def close():
env._render("human", close=True)
env.render = render
env.close = close
| 39.637363 | 228 | 0.650125 |
8db68e709e65e27f3bd2cce05fc02bc6d1007d41 | 7,553 | py | Python | views.py | alvienzo720/Dep_Nadine | b23688aa87ba3cfe138f9b243eed3f50a74e1486 | [
"Apache-2.0"
] | null | null | null | views.py | alvienzo720/Dep_Nadine | b23688aa87ba3cfe138f9b243eed3f50a74e1486 | [
"Apache-2.0"
] | null | null | null | views.py | alvienzo720/Dep_Nadine | b23688aa87ba3cfe138f9b243eed3f50a74e1486 | [
"Apache-2.0"
] | null | null | null | import datetime
import calendar
import pprint
import traceback
import logging
from django.conf import settings
from django.contrib import messages
from django.db.models import Q
from django.http import HttpResponse, Http404, HttpResponseServerError, HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib import auth, messages
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from django.utils.html import strip_tags
import django.contrib.contenttypes.models as content_type_models
from django.template import RequestContext
from django.core.cache import cache
from django.core.mail import send_mail
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.utils import feedgenerator, timezone
from django.urls import reverse
from django.contrib.auth.tokens import default_token_generator
from django.contrib.auth.forms import PasswordResetForm
from django.views.decorators.csrf import csrf_protect
from nadine.models.profile import EmailAddress
from nadine import email
logger = logging.getLogger(__name__)
@login_required
def index(request):
if request.user.is_staff:
return HttpResponseRedirect(reverse('staff:home'))
return HttpResponseRedirect(reverse('member:home'))
@csrf_protect
def password_reset(request, is_admin_site=False, template_name='registration/password_reset_form.html', email_template_name='registration/password_reset_email.html', password_reset_form=PasswordResetForm, token_generator=default_token_generator, post_reset_redirect=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_done')
if request.method == 'GET' and request.GET.get('email', None):
form = password_reset_form(initial={'email': request.GET.get('email')})
elif request.method == "POST":
email = request.POST.get('email')
valid = EmailAddress.objects.filter(email=email)
if len(valid) > 0:
logger.info("Resetting password for '%s'" % email)
form = password_reset_form(request.POST)
if form.is_valid():
opts = {}
opts['use_https'] = request.is_secure()
opts['token_generator'] = token_generator
if is_admin_site:
opts['domain_override'] = request.META['HTTP_HOST']
else:
opts['email_template_name'] = email_template_name
if not Site._meta.installed:
opts['domain_override'] = RequestSite(request).domain
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
print('There is no user associated with that email. Please try again.')
messages.error(request, 'There is no user associated with that email.')
return render(request, template_name, {'form': password_reset_form()})
else:
form = password_reset_form()
return render(request, template_name, {'form': form})
@login_required
def email_manage(request, email_pk, action):
"""Set the requested email address as the primary. Can only be
requested by the owner of the email address."""
email_address = get_object_or_404(EmailAddress, pk=email_pk)
if not email_address.user == request.user and not request.user.is_staff:
messages.error(request, "You are not authorized to manage this email address")
# if not email_address.is_verified():
# messages.error(request, "Email '%s' needs to be verified first." % email_address.email)
if action == "set_primary":
email_address.set_primary()
messages.success(request, "'%s' is now marked as your primary email address." % email_address.email)
elif action == "delete":
email_address.delete()
messages.success(request, "'%s' has been removed." % email_address.email)
if 'HTTP_REFERER' in request.META:
return redirect(request.META['HTTP_REFERER'])
else:
return redirect(reverse('member:profile:view', kwargs={'username': email_address.user.username}))
@login_required
def email_add(request):
user = get_object_or_404(User, username=request.POST.get("username"))
email = request.POST.get("email")
if email:
e = EmailAddress(user=user, email=email.lower())
e.save(verify=True)
if 'HTTP_REFERER' in request.META:
return redirect(request.META['HTTP_REFERER'])
else:
return redirect(reverse('member:profile:view', kwargs={'username': email_address.user.username}))
@login_required
def email_delete(request, email_pk):
"""Delete the given email. Must be owned by current user."""
email = get_object_or_404(EmailAddress, pk=int(email_pk))
if email.user == request.user:
if not email.is_verified():
email.delete()
else:
num_verified_emails = len(request.user.emailaddress_set.filter(
verified_at__isnull=False))
if num_verified_emails > 1:
email.delete()
elif num_verified_emails == 1:
if MM.ALLOW_REMOVE_LAST_VERIFIED_EMAIL:
email.delete()
else:
messages.error(request,
MM.REMOVE_LAST_VERIFIED_EMAIL_ATTEMPT_MSG,
extra_tags='alert-error')
else:
messages.error(request, 'Invalid request.')
return redirect(MM.DELETE_EMAIL_REDIRECT)
@csrf_protect
def email_verify(request, email_pk):
email_address = get_object_or_404(EmailAddress, pk=email_pk)
if email_address.is_verified():
messages.error(request, "Email address was already verified.")
if not email_address.user == request.user and not request.user.is_staff:
messages.error(request, "You are not authorized to verify this email address")
# Send the verification link if that was requested
if 'send_link' in request.GET:
email.send_verification(email_address)
verif_key = request.GET.get('verif_key', "").strip()
if len(verif_key) != 0:
if email_address.verif_key == verif_key:
# Looks good! Mark as verified
email_address.remote_addr = request.META.get('REMOTE_ADDR')
email_address.remote_host = request.META.get('REMOTE_HOST')
email_address.verified_ts = timezone.now()
email_address.save()
messages.success(request, "Email address has been verified.")
return HttpResponseRedirect(reverse('member:profile:view', kwargs={'username': email_address.user.username}))
else:
messages.error(request, "Invalid Key")
return render(request, "email_verify.html", {'email':email_address.email})
# Copyright 2020 Office Nomads LLC (https://officenomads.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://opensource.org/licenses/Apache-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| 46.337423 | 580 | 0.703561 |
7799e5abeb49b2cff1a184dfcdd36abef6e3de8e | 2,451 | py | Python | GamblersRuin.py | tobikuhlmann/monte-carlo-playground | 796f6d23f7677f5bc525ea603b1aa8a48cfd1c3b | [
"MIT"
] | null | null | null | GamblersRuin.py | tobikuhlmann/monte-carlo-playground | 796f6d23f7677f5bc525ea603b1aa8a48cfd1c3b | [
"MIT"
] | null | null | null | GamblersRuin.py | tobikuhlmann/monte-carlo-playground | 796f6d23f7677f5bc525ea603b1aa8a48cfd1c3b | [
"MIT"
] | null | null | null | import numpy as np
class GamblersRuin(object):
"""
Three fair coins tossed. Heads gets +1, tails -1, pay-offs are added and net pay-off added to equity.
The 3 tosses are repeated 1000 times. Initial equity is 10 dollars
p: probability that gambler is successful/ wins at each round.
i: gambler's initial amount of money/reserves
"""
def __init__(self, p, init_bal):
self.p = p
self.init_bal = init_bal
self.bal = init_bal
self.q = 1 - self.p
self.realizations = np.array(self.init_bal)
self.simulation_results = []
def coin_toss(self):
"""
One coin flip with payoff (1, -1) with probability (p,q)
"""
outcome = np.random.uniform(0, 1)
if outcome < self.p:
result = 1
else:
result = -1
return result
def play_one_round(self):
"""
Three coin tosses in one round round
"""
result_round = 0
for i in range(0,3):
result_round += self.coin_toss()
return result_round
def gamble(self, no_rounds):
"""
One round is played until ruin or no_rounds times
"""
self.realizations = np.array(self.init_bal)
self.bal = self.init_bal
round = 1
while round < no_rounds:
round_result = self.play_one_round()
if (self.bal + round_result) >= 0:
self.bal += round_result
else:
break
self.realizations = np.append(self.realizations, self.bal)
round += 1
def simulate(self, no_simulations, no_rounds):
# Gamble multiple times and store realization paths
self.simulation_results = []
for game in range(1,no_simulations+1):
self.gamble(no_rounds=no_rounds)
self.simulation_results.append(self.realizations)
def probability_ruin(self):
# Analytical solution for calculating probability of ruin if you play infinite games
if self.p > 0.5:
prob_ruin_analytical = 1 - ((self.q/self.p) ** self.init_bal)
else:
prob_ruin_analytical = 1
# Probability of ruin in simulation
# number of ruin / number of still in the game
no_ruin = self.simulation_results
return prob_ruin_analytical
if __name__ == "__main__":
# probability of success
p = 0.5
# initial amount
init_bal = 10
# number of rounds
no_rounds = 100
# number of simulations
no_simulations = 100
gr = GamblersRuin(p=float(p), init_bal=int(init_bal))
#result = gr.coin_toss()
#result = gr.play_one_round()
#print(result)
#gr.gamble(no_rounds=no_rounds)
#print(gr.realizations)
gr.simulate(no_simulations=no_simulations, no_rounds=no_rounds)
print(gr.simulation_results) | 25.53125 | 102 | 0.711138 |
cd9477d3e2d4e3d11199f2b7bb3c9c5629a47b27 | 6,214 | py | Python | logicmonitor_sdk/models/netflow_bandwidth.py | JeremyTangCD/lm-sdk-python | 2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983 | [
"Apache-2.0"
] | null | null | null | logicmonitor_sdk/models/netflow_bandwidth.py | JeremyTangCD/lm-sdk-python | 2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983 | [
"Apache-2.0"
] | null | null | null | logicmonitor_sdk/models/netflow_bandwidth.py | JeremyTangCD/lm-sdk-python | 2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from logicmonitor_sdk.models.netflow_data_base import NetflowDataBase # noqa: F401,E501
class NetflowBandwidth(NetflowDataBase):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data_type': 'str',
'receive': 'float',
'usage': 'float',
'send': 'float',
'device_display_name': 'str'
}
attribute_map = {
'data_type': 'dataType',
'receive': 'receive',
'usage': 'usage',
'send': 'send',
'device_display_name': 'deviceDisplayName'
}
def __init__(self, data_type=None, receive=None, usage=None, send=None, device_display_name=None): # noqa: E501
"""NetflowBandwidth - a model defined in Swagger""" # noqa: E501
self._data_type = None
self._receive = None
self._usage = None
self._send = None
self._device_display_name = None
self.discriminator = None
if data_type is not None:
self.data_type = data_type
if receive is not None:
self.receive = receive
if usage is not None:
self.usage = usage
if send is not None:
self.send = send
if device_display_name is not None:
self.device_display_name = device_display_name
@property
def data_type(self):
"""Gets the data_type of this NetflowBandwidth. # noqa: E501
:return: The data_type of this NetflowBandwidth. # noqa: E501
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""Sets the data_type of this NetflowBandwidth.
:param data_type: The data_type of this NetflowBandwidth. # noqa: E501
:type: str
"""
self._data_type = data_type
@property
def receive(self):
"""Gets the receive of this NetflowBandwidth. # noqa: E501
:return: The receive of this NetflowBandwidth. # noqa: E501
:rtype: float
"""
return self._receive
@receive.setter
def receive(self, receive):
"""Sets the receive of this NetflowBandwidth.
:param receive: The receive of this NetflowBandwidth. # noqa: E501
:type: float
"""
self._receive = receive
@property
def usage(self):
"""Gets the usage of this NetflowBandwidth. # noqa: E501
:return: The usage of this NetflowBandwidth. # noqa: E501
:rtype: float
"""
return self._usage
@usage.setter
def usage(self, usage):
"""Sets the usage of this NetflowBandwidth.
:param usage: The usage of this NetflowBandwidth. # noqa: E501
:type: float
"""
self._usage = usage
@property
def send(self):
"""Gets the send of this NetflowBandwidth. # noqa: E501
:return: The send of this NetflowBandwidth. # noqa: E501
:rtype: float
"""
return self._send
@send.setter
def send(self, send):
"""Sets the send of this NetflowBandwidth.
:param send: The send of this NetflowBandwidth. # noqa: E501
:type: float
"""
self._send = send
@property
def device_display_name(self):
"""Gets the device_display_name of this NetflowBandwidth. # noqa: E501
:return: The device_display_name of this NetflowBandwidth. # noqa: E501
:rtype: str
"""
return self._device_display_name
@device_display_name.setter
def device_display_name(self, device_display_name):
"""Sets the device_display_name of this NetflowBandwidth.
:param device_display_name: The device_display_name of this NetflowBandwidth. # noqa: E501
:type: str
"""
self._device_display_name = device_display_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NetflowBandwidth, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetflowBandwidth):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.990991 | 304 | 0.592855 |
bc74cba949bab11cfaf7f9a4c878729a83ea14a9 | 1,121 | py | Python | tests/integration/actions.py | jeffreymelvin-wf/aws-lambda-fsm-workflows | c96bc324be4e5fbd28c3a64d9d95bb8fc9b706e1 | [
"Apache-2.0"
] | null | null | null | tests/integration/actions.py | jeffreymelvin-wf/aws-lambda-fsm-workflows | c96bc324be4e5fbd28c3a64d9d95bb8fc9b706e1 | [
"Apache-2.0"
] | null | null | null | tests/integration/actions.py | jeffreymelvin-wf/aws-lambda-fsm-workflows | c96bc324be4e5fbd28c3a64d9d95bb8fc9b706e1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2018 Workiva Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aws_lambda_fsm.action import Action
class ReturnOK(Action):
def execute(self, context, obj):
if [context.steps, context.retries] in context.get('fail_at', []):
raise Exception()
return 'ok'
class IncrementCounter(Action):
def execute(self, context, obj):
if [context.steps, context.retries] in context.get('fail_at', []):
raise Exception()
context['counter'] = context.get('counter', 0) + 1
return 'ok' if (context['counter'] < context['loops']) else 'done'
| 36.16129 | 74 | 0.695807 |
de1f68359217018510efbfbb7a55716131d0e8ed | 20,878 | py | Python | gamelib/cocos/actions/interval_actions.py | luciotorre/aiamsori | 521f3e16868326889caae9f8703ed042aead8817 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | gamelib/cocos/actions/interval_actions.py | luciotorre/aiamsori | 521f3e16868326889caae9f8703ed042aead8817 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | gamelib/cocos/actions/interval_actions.py | luciotorre/aiamsori | 521f3e16868326889caae9f8703ed042aead8817 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008 Daniel Moisset, Ricardo Quesada, Rayentray Tappa, Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Interval Action
Interval Actions
================
An interval action is an action that takes place within a certain period of time.
It has an start time, and a finish time. The finish time is the parameter
``duration`` plus the start time.
These `IntervalAction` have some interesting properties, like:
- They can run normally (default)
- They can run reversed with the `Reverse` action.
- They can run with the time altered with the `Accelerate`, `AccelDeccel` and
`Speed` actions.
For example, you can simulate a Ping Pong effect running the action normally and
then running it again in Reverse mode.
Example::
ping_pong_action = action + Reverse( action )
Available IntervalActions
=========================
* `MoveTo`
* `MoveBy`
* `JumpTo`
* `JumpBy`
* `Bezier`
* `Blink`
* `RotateTo`
* `RotateBy`
* `ScaleTo`
* `ScaleBy`
* `FadeOut`
* `FadeIn`
* `FadeTo`
* `Delay`
* `RandomDelay`
Modifier actions
================
* `Accelerate`
* `AccelDeccel`
* `Speed`
Examples::
move = MoveBy( (200,0), duration=5 ) # Moves 200 pixels to the right in 5 seconds.
move = MoveTo( (320,240), duration=5) # Moves to the pixel (320,240) in 5 seconds
jump = JumpBy( (320,0), 100, 5, duration=5) # Jumps to the right 320 pixels
# doing 5 jumps of 100 pixels
# of height in 5 seconds
accel_move = Accelerate(move) # accelerates action move
'''
__docformat__ = 'restructuredtext'
import random
import copy
import math
from base_actions import *
from cocos.euclid import *
__all__ = [ 'Lerp', # interpolation
'MoveTo','MoveBy', # movement actions
'Jump', 'JumpTo', 'JumpBy',
'Bezier', # complex movement actions
'Rotate',"RotateTo", "RotateBy", # object rotation
'ScaleTo','ScaleBy', # object scale
'Delay','RandomDelay', # Delays
'FadeOut','FadeIn','FadeTo', # Fades in/out action
'Blink', # Blink action
'Accelerate','AccelDeccel','Speed', # Time alter actions
]
class Lerp( IntervalAction ):
"""
Interpolate between values for some specified attribute
"""
def init(self, attrib, start, end, duration):
"""Init method.
:Parameters:
`attrib` : string
The name of the attrbiute where the value is stored
`start` : float
The start value
`end` : float
The end value
`duration` : float
Duration time in seconds
"""
self.attrib = attrib
self.duration = duration
self.start_p = start
self.end_p = end
self.delta = end-start
def update(self, t):
setattr(self.target, self.attrib,
self.start_p + self.delta * t
)
def __reversed__(self):
return Lerp(self.attrib, self.end_p, self.start_p, self.duration)
class RotateBy( IntervalAction ):
"""Rotates a `CocosNode` object clockwise a number of degrees
by modiying it's rotation attribute.
Example::
# rotates the sprite 180 degrees in 2 seconds
action = RotateBy( 180, 2 )
sprite.do( action )
"""
def init(self, angle, duration ):
"""Init method.
:Parameters:
`angle` : float
Degrees that the sprite will be rotated.
Positive degrees rotates the sprite clockwise.
`duration` : float
Duration time in seconds
"""
self.angle = angle #: Quantity of degrees to rotate
self.duration = duration #: Duration in seconds
def start( self ):
self.start_angle = self.target.rotation
def update(self, t):
self.target.rotation = (self.start_angle + self.angle * t ) % 360
def __reversed__(self):
return RotateBy(-self.angle, self.duration)
Rotate = RotateBy
class RotateTo( IntervalAction ):
"""Rotates a `CocosNode` object to a certain angle by modifying it's
rotation attribute.
The direction will be decided by the shortest angle.
Example::
# rotates the sprite to angle 180 in 2 seconds
action = RotateTo( 180, 2 )
sprite.do( action )
"""
def init(self, angle, duration ):
"""Init method.
:Parameters:
`angle` : float
Destination angle in degrees.
`duration` : float
Duration time in seconds
"""
self.angle = angle%360 #: Destination angle in degrees
self.duration = duration #: Duration in seconds
def start( self ):
ea = self.angle
sa = self.start_angle = (self.target.rotation%360)
self.angle = ((ea%360) - (sa%360))
if self.angle > 180:
self.angle = -360+self.angle
if self.angle < -180:
self.angle = 360+self.angle
def update(self, t):
self.target.rotation = (self.start_angle + self.angle * t ) % 360
def __reversed__(self):
return RotateTo(-self.angle, self.duration)
class Speed( IntervalAction ):
"""
Changes the speed of an action, making it take longer (speed>1)
or less (speed<1)
Example::
# rotates the sprite 180 degrees in 1 secondclockwise
action = Speed( Rotate( 180, 2 ), 2 )
sprite.do( action )
"""
def init(self, other, speed ):
"""Init method.
:Parameters:
`other` : IntervalAction
The action that will be affected
`speed` : float
The speed change. 1 is no change.
2 means twice as fast, takes half the time
0.5 means half as fast, takes double the time
"""
self.other = other
self.speed = speed
self.duration = other.duration/speed
def start(self):
self.other.target = self.target
self.other.start()
def update(self, t):
self.other.update( t )
def __reversed__(self):
return Speed( Reverse( self.other ), self.speed )
class Accelerate( IntervalAction ):
"""
Changes the acceleration of an action
Example::
# rotates the sprite 180 degrees in 2 seconds clockwise
# it starts slow and ends fast
action = Accelerate( Rotate( 180, 2 ), 4 )
sprite.do( action )
"""
def init(self, other, rate = 2):
"""Init method.
:Parameters:
`other` : IntervalAction
The action that will be affected
`rate` : float
The acceleration rate. 1 is linear.
the new t is t**rate
"""
self.other = other
self.rate = rate
self.duration = other.duration
def start(self):
self.other.target = self.target
self.other.start()
def update(self, t):
self.other.update( t**self.rate )
def __reversed__(self):
return Accelerate(Reverse(self.other), 1.0/self.rate)
class AccelDeccel( IntervalAction ):
"""
Makes an action change the travel speed but retain near normal
speed at the beginning and ending.
Example::
# rotates the sprite 180 degrees in 2 seconds clockwise
# it starts slow, gets fast and ends slow
action = AccelDeccel( RotateBy( 180, 2 ) )
sprite.do( action )
"""
def init(self, other):
"""Init method.
:Parameters:
`other` : IntervalAction
The action that will be affected
"""
self.other = other
self.duration = other.duration
def start(self):
self.other.target = self.target
self.other.start()
def update(self, t):
ft = (t-0.5) * 12
nt = 1./( 1. + math.exp(-ft) )
self.other.update( nt )
def __reversed__(self):
return AccelDeccel( Reverse(self.other) )
class MoveTo( IntervalAction ):
"""Moves a `CocosNode` object to the position x,y. x and y are absolute coordinates
by modifying it's position attribute.
Example::
# Move the sprite to coords x=50, y=10 in 8 seconds
action = MoveTo( (50,10), 8 )
sprite.do( action )
"""
def init(self, dst_coords, duration=5):
"""Init method.
:Parameters:
`dst_coords` : (x,y)
Coordinates where the sprite will be placed at the end of the action
`duration` : float
Duration time in seconds
"""
self.end_position = Point2( *dst_coords )
self.duration = duration
def start( self ):
self.start_position = self.target.position
self.delta = self.end_position-self.start_position
def update(self,t):
self.target.position = self.start_position + self.delta * t
class MoveBy( MoveTo ):
"""Moves a `CocosNode` object x,y pixels by modifying it's
position attribute.
x and y are relative to the position of the object.
Duration is is seconds.
Example::
# Move the sprite 50 pixels to the left in 8 seconds
action = MoveBy( (-50,0), 8 )
sprite.do( action )
"""
def init(self, delta, duration=5):
"""Init method.
:Parameters:
`delta` : (x,y)
Delta coordinates
`duration` : float
Duration time in seconds
"""
self.delta = Point2( *delta )
self.duration = duration
def start( self ):
self.start_position = self.target.position
self.end_position = self.start_position + self.delta
def __reversed__(self):
return MoveBy(-self.delta, self.duration)
class FadeOut( IntervalAction ):
"""Fades out a `CocosNode` object by modifying it's opacity attribute.
Example::
action = FadeOut( 2 )
sprite.do( action )
"""
def init( self, duration ):
"""Init method.
:Parameters:
`duration` : float
Seconds that it will take to fade
"""
self.duration = duration
def update( self, t ):
self.target.opacity = 255 * (1-t)
def __reversed__(self):
return FadeIn( self.duration )
class FadeTo( IntervalAction ):
"""Fades a `CocosNode` object to a specific alpha value by modifying it's opacity attribute.
Example::
action = FadeTo( 128, 2 )
sprite.do( action )
"""
def init( self, alpha, duration ):
"""Init method.
:Parameters:
`alpha` : float
0-255 value of opacity
`duration` : float
Seconds that it will take to fade
"""
self.alpha = alpha
self.duration = duration
def start(self):
self.start_alpha = self.target.opacity
def update( self, t ):
self.target.opacity = self.start_alpha + (
self.alpha - self.start_alpha
) * t
class FadeIn( FadeOut):
"""Fades in a `CocosNode` object by modifying it's opacity attribute.
Example::
action = FadeIn( 2 )
sprite.do( action )
"""
def update( self, t ):
self.target.opacity = 255 * t
def __reversed__(self):
return FadeOut( self.duration )
class ScaleTo(IntervalAction):
"""Scales a `CocosNode` object to a zoom factor by modifying it's scale attribute.
Example::
# scales the sprite to 5x in 2 seconds
action = ScaleTo( 5, 2 )
sprite.do( action )
"""
def init(self, scale, duration=5 ):
"""Init method.
:Parameters:
`scale` : float
scale factor
`duration` : float
Duration time in seconds
"""
self.end_scale = scale
self.duration = duration
def start( self ):
self.start_scale = self.target.scale
self.delta = self.end_scale-self.start_scale
def update(self, t):
self.target.scale = self.start_scale + self.delta * t
class ScaleBy(ScaleTo):
"""Scales a `CocosNode` object a zoom factor by modifying it's scale attribute.
Example::
# scales the sprite by 5x in 2 seconds
action = ScaleBy( 5, 2 )
sprite.do( action )
"""
def start( self ):
self.start_scale = self.target.scale
self.delta = self.start_scale*self.end_scale - self.start_scale
def __reversed__(self):
return ScaleBy( 1.0/self.end_scale, self.duration )
class Blink( IntervalAction ):
"""Blinks a `CocosNode` object by modifying it's visible attribute
Example::
# Blinks 10 times in 2 seconds
action = Blink( 10, 2 )
sprite.do( action )
"""
def init(self, times, duration):
"""Init method.
:Parameters:
`times` : integer
Number of times to blink
`duration` : float
Duration time in seconds
"""
self.times = times
self.duration = duration
def update(self, t):
slice = 1 / float( self.times )
m = t % slice
self.target.visible = (m > slice / 2.0)
def __reversed__(self):
return self
class Bezier( IntervalAction ):
"""Moves a `CocosNode` object through a bezier path by modifying it's position attribute.
Example::
action = Bezier( bezier_conf.path1, 5 ) # Moves the sprite using the
sprite.do( action ) # bezier path 'bezier_conf.path1'
# in 5 seconds
"""
def init(self, bezier, duration=5, forward=True):
"""Init method
:Parameters:
`bezier` : bezier_configuration instance
A bezier configuration
`duration` : float
Duration time in seconds
"""
self.duration = duration
self.bezier = bezier
self.forward = forward
def start( self ):
self.start_position = self.target.position
def update(self,t):
if self.forward:
p = self.bezier.at( t )
else:
p = self.bezier.at( 1-t )
self.target.position = ( self.start_position +Point2( *p ) )
def __reversed__(self):
return Bezier(self.bezier, self.duration, not self.forward)
class Jump(IntervalAction):
"""Moves a `CocosNode` object simulating a jump movement by modifying it's position attribute.
Example::
action = Jump(50,200, 5, 6) # Move the sprite 200 pixels to the right
sprite.do( action ) # in 6 seconds, doing 5 jumps
# of 50 pixels of height
"""
def init(self, y=150, x=120, jumps=1, duration=5):
"""Init method
:Parameters:
`y` : integer
Height of jumps
`x` : integer
horizontal movement relative to the startin position
`jumps` : integer
quantity of jumps
`duration` : float
Duration time in seconds
"""
import warnings
warnings.warn('Deprecated "Jump" action. Consider using JumpBy instead', DeprecationWarning)
self.y = y
self.x = x
self.duration = duration
self.jumps = jumps
def start( self ):
self.start_position = self.target.position
def update(self, t):
y = int( self.y * abs( math.sin( t * math.pi * self.jumps ) ) )
x = self.x * t
self.target.position = self.start_position + Point2(x,y)
def __reversed__(self):
return Jump(self.y, -self.x, self.jumps, self.duration)
class JumpBy(IntervalAction):
"""Moves a `CocosNode` object simulating a jump movement by modifying it's position attribute.
Example::
# Move the sprite 200 pixels to the right and up
action = JumpBy((100,100),200, 5, 6)
sprite.do( action ) # in 6 seconds, doing 5 jumps
# of 200 pixels of height
"""
def init(self, position=(0,0), height=100, jumps=1, duration=5):
"""Init method
:Parameters:
`position` : integer x integer tuple
horizontal and vertical movement relative to the
starting position
`height` : integer
Height of jumps
`jumps` : integer
quantity of jumps
`duration` : float
Duration time in seconds
"""
self.position = position
self.height = height
self.duration = duration
self.jumps = jumps
def start( self ):
self.start_position = self.target.position
self.delta = Vector2(*self.position)
def update(self, t):
y = int( self.height * abs( math.sin( t * math.pi * self.jumps ) ) )
y += self.delta[1] * t
x = self.delta[0] * t
self.target.position = self.start_position + Point2(x,y)
def __reversed__(self):
return JumpBy( (-self.position[0],-self.position[1]), self.height, self.jumps, self.duration)
class JumpTo(JumpBy):
"""Moves a `CocosNode` object to a position simulating a jump movement by modifying
it's position attribute.
Example::
action = JumpTo(50,200, 5, 6) # Move the sprite 200 pixels to the right
sprite.do( action ) # in 6 seconds, doing 5 jumps
# of 50 pixels of height
"""
def start( self ):
self.start_position = self.target.position
self.delta = Vector2(*self.position)-self.start_position
class Delay(IntervalAction):
"""Delays the action a certain amount of seconds
Example::
action = Delay(2.5)
sprite.do( action )
"""
def init(self, delay):
"""Init method
:Parameters:
`delay` : float
Seconds of delay
"""
self.duration = delay
def __reversed__(self):
return self
class RandomDelay(Delay):
"""Delays the actions between *min* and *max* seconds
Example::
action = RandomDelay(2.5, 4.5) # delays the action between 2.5 and 4.5 seconds
sprite.do( action )
"""
def init(self, low, hi):
"""Init method
:Parameters:
`low` : float
Minimun seconds of delay
`hi` : float
Maximun seconds of delay
"""
self.low = low
self.hi = hi
def __deepcopy__(self, memo):
new = copy.copy(self)
new.duration = self.low + (random.random() * (self.hi - self.low))
return new
| 28.718019 | 101 | 0.569691 |
19a28e3c99f742edbea91d52cb0519bbf2ab37d8 | 754 | py | Python | qc3/formats/skp/skp_const.py | wtfo-guru/queconverter | fc3529e46d5af1d90840c52ed9f58fb3c255523b | [
"BSD-2-Clause"
] | null | null | null | qc3/formats/skp/skp_const.py | wtfo-guru/queconverter | fc3529e46d5af1d90840c52ed9f58fb3c255523b | [
"BSD-2-Clause"
] | null | null | null | qc3/formats/skp/skp_const.py | wtfo-guru/queconverter | fc3529e46d5af1d90840c52ed9f58fb3c255523b | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 by Igor E. Novikov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
SKP_ID = "##sK1 palette"
| 39.684211 | 75 | 0.733422 |
f94dc3ad5b7ee1dcac4981e1cfacda4617102236 | 3,610 | py | Python | app/recipe/views.py | jvaras05/recipe-app-api | 4641dc5472167624a1777f98948a7adbe6ae3e0b | [
"MIT"
] | null | null | null | app/recipe/views.py | jvaras05/recipe-app-api | 4641dc5472167624a1777f98948a7adbe6ae3e0b | [
"MIT"
] | null | null | null | app/recipe/views.py | jvaras05/recipe-app-api | 4641dc5472167624a1777f98948a7adbe6ae3e0b | [
"MIT"
] | null | null | null | from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import viewsets, mixins, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from core.models import Tag, Ingredient, Recipe
from recipe import serializers
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Base viewset for user owned recipe attributes"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""Return objects for the current authenticated user only"""
assigned_only = bool(
int(self.request.query_params.get('assigned_only', 0))
)
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(
user=self.request.user
).order_by('-name').distinct()
def perform_create(self, serializer):
"""Create a new object"""
serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage tag in the database"""
queryset = Tag.objects.all()
serializer_class = serializers.TagSerializer
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage ingredients in the database"""
queryset = Ingredient.objects.all()
serializer_class = serializers.IngredientSerializer
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage recipes in the database"""
serializer_class = serializers.RecipeSerializer
queryset = Recipe.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def _params_to_ints(self, qs):
"""Convert a list of string IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
def get_queryset(self):
"""Retrieve the recipes for the authenticated user"""
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tag_ids = self._params_to_ints(tags)
queryset = queryset.filter(tags__id__in=tag_ids)
if ingredients:
ingredient_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredient_ids)
return queryset.filter(user=self.request.user)
def get_serializer_class(self):
"""Return appropriate serializer class"""
if self.action == 'retrieve':
return serializers.RecipeDetailSerializer
elif self.action == 'upload_image':
return serializers.RecipeImageSerializer
return self.serializer_class
def perform_create(self, serializer):
"""Create a new recipe"""
serializer.save(user=self.request.user)
@action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
"""Upload an image to a recipe"""
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
serializer.data,
status=status.HTTP_200_OK
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
| 33.738318 | 74 | 0.665928 |
d178b044606cd38325456fc4293c4f95ee768d22 | 4,498 | py | Python | venv/lib/python3.6/site-packages/celery/concurrency/base.py | kuldeep24680/recurring_payments | 79e589c3d3f4fb1a0791725065e2c068750ef6b2 | [
"MIT"
] | 13 | 2018-03-28T23:07:01.000Z | 2022-03-12T06:01:21.000Z | newenv/lib/python3.8/site-packages/celery/concurrency/base.py | palakshivlani-11/cryptorium | eebb78c061007519e527b3d18b8df6bc13679c46 | [
"Apache-2.0"
] | 11 | 2018-06-18T15:49:07.000Z | 2021-11-25T01:45:33.000Z | newenv/lib/python3.8/site-packages/celery/concurrency/base.py | palakshivlani-11/cryptorium | eebb78c061007519e527b3d18b8df6bc13679c46 | [
"Apache-2.0"
] | 5 | 2018-03-28T23:07:05.000Z | 2021-12-09T19:02:00.000Z | # -*- coding: utf-8 -*-
"""Base Execution Pool."""
from __future__ import absolute_import, unicode_literals
import logging
import os
import sys
from billiard.einfo import ExceptionInfo
from billiard.exceptions import WorkerLostError
from kombu.utils.encoding import safe_repr
from celery.exceptions import WorkerShutdown, WorkerTerminate
from celery.five import monotonic, reraise
from celery.utils import timer2
from celery.utils.log import get_logger
from celery.utils.text import truncate
__all__ = ('BasePool', 'apply_target')
logger = get_logger('celery.pool')
def apply_target(target, args=(), kwargs=None, callback=None,
accept_callback=None, pid=None, getpid=os.getpid,
propagate=(), monotonic=monotonic, **_):
"""Apply function within pool context."""
kwargs = {} if not kwargs else kwargs
if accept_callback:
accept_callback(pid or getpid(), monotonic())
try:
ret = target(*args, **kwargs)
except propagate:
raise
except Exception:
raise
except (WorkerShutdown, WorkerTerminate):
raise
except BaseException as exc:
try:
reraise(WorkerLostError, WorkerLostError(repr(exc)),
sys.exc_info()[2])
except WorkerLostError:
callback(ExceptionInfo())
else:
callback(ret)
class BasePool(object):
"""Task pool."""
RUN = 0x1
CLOSE = 0x2
TERMINATE = 0x3
Timer = timer2.Timer
#: set to true if the pool can be shutdown from within
#: a signal handler.
signal_safe = True
#: set to true if pool uses greenlets.
is_green = False
_state = None
_pool = None
_does_debug = True
#: only used by multiprocessing pool
uses_semaphore = False
task_join_will_block = True
body_can_be_buffer = False
def __init__(self, limit=None, putlocks=True, forking_enable=True,
callbacks_propagate=(), app=None, **options):
self.limit = limit
self.putlocks = putlocks
self.options = options
self.forking_enable = forking_enable
self.callbacks_propagate = callbacks_propagate
self.app = app
def on_start(self):
pass
def did_start_ok(self):
return True
def flush(self):
pass
def on_stop(self):
pass
def register_with_event_loop(self, loop):
pass
def on_apply(self, *args, **kwargs):
pass
def on_terminate(self):
pass
def on_soft_timeout(self, job):
pass
def on_hard_timeout(self, job):
pass
def maintain_pool(self, *args, **kwargs):
pass
def terminate_job(self, pid, signal=None):
raise NotImplementedError(
'{0} does not implement kill_job'.format(type(self)))
def restart(self):
raise NotImplementedError(
'{0} does not implement restart'.format(type(self)))
def stop(self):
self.on_stop()
self._state = self.TERMINATE
def terminate(self):
self._state = self.TERMINATE
self.on_terminate()
def start(self):
self._does_debug = logger.isEnabledFor(logging.DEBUG)
self.on_start()
self._state = self.RUN
def close(self):
self._state = self.CLOSE
self.on_close()
def on_close(self):
pass
def apply_async(self, target, args=None, kwargs=None, **options):
"""Equivalent of the :func:`apply` built-in function.
Callbacks should optimally return as soon as possible since
otherwise the thread which handles the result will get blocked.
"""
kwargs = {} if not kwargs else kwargs
args = [] if not args else args
if self._does_debug:
logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)',
target, truncate(safe_repr(args), 1024),
truncate(safe_repr(kwargs), 1024))
return self.on_apply(target, args, kwargs,
waitforslot=self.putlocks,
callbacks_propagate=self.callbacks_propagate,
**options)
def _get_info(self):
return {
'max-concurrency': self.limit,
}
@property
def info(self):
return self._get_info()
@property
def active(self):
return self._state == self.RUN
@property
def num_processes(self):
return self.limit
| 25.556818 | 74 | 0.616496 |
d4eaa05c745f19f3c738f06c02d6b9e88861e467 | 3,988 | py | Python | platform.py | grahamjamesaddis/platform-teensy | 68536c188e262c7fa9674fb25715e397334ce069 | [
"Apache-2.0"
] | null | null | null | platform.py | grahamjamesaddis/platform-teensy | 68536c188e262c7fa9674fb25715e397334ce069 | [
"Apache-2.0"
] | null | null | null | platform.py | grahamjamesaddis/platform-teensy | 68536c188e262c7fa9674fb25715e397334ce069 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import platform
from platformio.managers.platform import PlatformBase
class TeensyPlatform(PlatformBase):
def configure_default_packages(self, variables, targets):
if variables.get("board"):
board_config = self.board_config(variables.get("board"))
del_toolchain = "toolchain-gccarmnoneeabi"
if board_config.get("build.core") != "teensy":
del_toolchain = "toolchain-atmelavr"
if del_toolchain in self.packages:
del self.packages[del_toolchain]
if "mbed" in variables.get("pioframework", []):
self.packages["toolchain-gccarmnoneeabi"][
"version"] = ">=1.60301.0,<1.80000.0"
# configure J-LINK tool
jlink_conds = [
"jlink" in variables.get(option, "")
for option in ("upload_protocol", "debug_tool")
]
if variables.get("board"):
board_config = self.board_config(variables.get("board"))
jlink_conds.extend([
"jlink" in board_config.get(key, "")
for key in ("debug.default_tools", "upload.protocol")
])
jlink_pkgname = "tool-jlink"
if not any(jlink_conds) and jlink_pkgname in self.packages:
del self.packages[jlink_pkgname]
return PlatformBase.configure_default_packages(
self, variables, targets)
def get_boards(self, id_=None):
result = PlatformBase.get_boards(self, id_)
if not result:
return result
if id_:
return self._add_default_debug_tools(result)
else:
for key, value in result.items():
result[key] = self._add_default_debug_tools(result[key])
return result
def _add_default_debug_tools(self, board):
debug = board.manifest.get("debug", {})
upload_protocols = board.manifest.get("upload", {}).get(
"protocols", [])
if "tools" not in debug:
debug["tools"] = {}
if "jlink" in upload_protocols and "jlink" not in debug["tools"]:
assert debug.get("jlink_device"), (
"Missed J-Link Device ID for %s" % board.id)
debug["tools"]["jlink"] = {
"server": {
"package": "tool-jlink",
"arguments": [
"-singlerun",
"-if", "SWD",
"-select", "USB",
"-device", debug.get("jlink_device"),
"-port", "2331"
],
"executable": ("JLinkGDBServerCL.exe"
if platform.system() == "Windows" else
"JLinkGDBServer")
}
}
board.manifest["debug"] = debug
return board
def configure_debug_options(self, initial_debug_options, ide_data):
debug_options = copy.deepcopy(initial_debug_options)
server_executable = debug_options["server"]["executable"].lower()
adapter_speed = initial_debug_options.get("speed")
if adapter_speed:
if "jlink" in server_executable:
debug_options["server"]["arguments"].extend(
["-speed", adapter_speed]
)
return debug_options
| 37.980952 | 74 | 0.575978 |
c215241d808bb5124545be69639f3a51d5700b13 | 3,962 | py | Python | nuitka/tools/release/bump/__main__.py | zegervdv/Nuitka | ef1b62fecb634c51befede8da218c22f127836e9 | [
"Apache-2.0"
] | 1 | 2021-07-05T03:05:05.000Z | 2021-07-05T03:05:05.000Z | nuitka/tools/release/bump/__main__.py | ztessler/Nuitka | 04c9a5471b702a0e5f28398f2661c93b83ab0d1a | [
"Apache-2.0"
] | null | null | null | nuitka/tools/release/bump/__main__.py | ztessler/Nuitka | 04c9a5471b702a0e5f28398f2661c93b83ab0d1a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Make version bump for Nuitka. """
from __future__ import print_function
import sys
from optparse import OptionParser
from nuitka.tools.Basics import goHome
from nuitka.tools.release.Debian import updateDebianChangelog
from nuitka.tools.release.Release import getBranchName
def getBumpedVersion(mode, old_version):
if mode == "prerelease":
if "rc" in old_version:
parts = old_version.split("rc")
new_version = "rc".join([parts[0], str(int(parts[1]) + 1)])
else:
old_version = ".".join(old_version.split(".")[:3])
parts = old_version.split(".")
parts[-1] = str(int(parts[-1]) + 1)
new_version = ".".join(parts) + "rc1"
elif mode == "release":
if "rc" in old_version:
old_version = old_version[: old_version.find("rc")]
was_pre = True
else:
was_pre = False
new_version = ".".join(old_version.split(".")[:3])
if not was_pre:
parts = new_version.split(".")
parts[-1] = str(int(parts[-1]) + 1)
new_version = ".".join(parts)
elif mode == "hotfix":
assert "pre" not in old_version and "rc" not in old_version
parts = old_version.split(".")
if len(parts) == 3:
parts.append("1")
else:
parts[-1] = str(int(parts[-1]) + 1)
new_version = ".".join(parts)
else:
sys.exit("Error, unknown mode '%s'." % mode)
return new_version
def main():
parser = OptionParser()
parser.add_option(
"--mode",
action="store",
dest="mode",
default=None,
help="""\
The mode of update, prerelease, hotfix, release, auto (default auto determines from branch).""",
)
options, positional_args = parser.parse_args()
if positional_args:
parser.print_help()
sys.exit("\nError, no positional argument allowed.")
# Go its own directory, to have it easy with path knowledge.
goHome()
with open("nuitka/Version.py") as f:
option_lines = f.readlines()
(version_line,) = [line for line in option_lines if line.startswith("Nuitka V")]
old_version = version_line[8:].rstrip()
mode = options.mode
branch_name = getBranchName()
if mode is None:
if branch_name.startswith("hotfix/"):
mode = "hotfix"
elif branch_name == "master" or branch_name.startswith("release/"):
mode = "release"
elif branch_name == "develop":
mode = "prerelease"
else:
sys.exit("Error, cannot detect mode from branch name '%s'." % branch_name)
new_version = getBumpedVersion(mode, old_version)
print("Bumped", mode, old_version, "->", new_version)
with open("nuitka/Version.py", "w") as options_file:
for line in option_lines:
if line.startswith("Nuitka V"):
line = "Nuitka V" + new_version + "\n"
options_file.write(line)
# Debian is currently in freeze, change to "unstable" once that changes.
updateDebianChangelog(old_version, new_version, "experimental")
| 30.476923 | 96 | 0.615851 |
69e5bc99522ebdc3fe9c6e29d6f69328f93626ec | 66,888 | py | Python | Lib/test/test_bytes.py | vic/pysano | bcfd0522711efaaacf68821b831674b0ff48b6a1 | [
"PSF-2.0"
] | 4 | 2016-04-02T00:01:50.000Z | 2017-07-13T02:11:04.000Z | Lib/test/test_bytes.py | vic/pysano | bcfd0522711efaaacf68821b831674b0ff48b6a1 | [
"PSF-2.0"
] | null | null | null | Lib/test/test_bytes.py | vic/pysano | bcfd0522711efaaacf68821b831674b0ff48b6a1 | [
"PSF-2.0"
] | null | null | null | """Unit tests for the bytes and bytearray types.
XXX This is a mess. Common tests should be moved to buffer_tests.py,
which itself ought to be unified with string_tests.py (and the latter
should be modernized).
"""
import os
import re
import sys
import copy
import functools
import pickle
import tempfile
import unittest
import test.support
import test.string_tests
import test.buffer_tests
import test.list_tests
from test.support import bigaddrspacetest, MAX_Py_ssize_t
if sys.flags.bytes_warning:
def check_bytes_warnings(func):
@functools.wraps(func)
def wrapper(*args, **kw):
with test.support.check_warnings(('', BytesWarning)):
return func(*args, **kw)
return wrapper
else:
# no-op
def check_bytes_warnings(func):
return func
class Indexable:
def __init__(self, value=0):
self.value = value
def __index__(self):
return self.value
class BaseBytesTest:
def test_basics(self):
b = self.type2test()
self.assertEqual(type(b), self.type2test)
self.assertEqual(b.__class__, self.type2test)
def test_copy(self):
a = self.type2test(b"abcd")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertEqual(a, b)
self.assertEqual(type(a), type(b))
def test_empty_sequence(self):
b = self.type2test()
self.assertEqual(len(b), 0)
self.assertRaises(IndexError, lambda: b[0])
self.assertRaises(IndexError, lambda: b[1])
self.assertRaises(IndexError, lambda: b[sys.maxsize])
self.assertRaises(IndexError, lambda: b[sys.maxsize+1])
self.assertRaises(IndexError, lambda: b[10**100])
self.assertRaises(IndexError, lambda: b[-1])
self.assertRaises(IndexError, lambda: b[-2])
self.assertRaises(IndexError, lambda: b[-sys.maxsize])
self.assertRaises(IndexError, lambda: b[-sys.maxsize-1])
self.assertRaises(IndexError, lambda: b[-sys.maxsize-2])
self.assertRaises(IndexError, lambda: b[-10**100])
def test_from_list(self):
ints = list(range(256))
b = self.type2test(i for i in ints)
self.assertEqual(len(b), 256)
self.assertEqual(list(b), ints)
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
Indexable(255)])
self.assertEqual(list(b), [0, 1, 254, 255])
self.assertRaises(ValueError, self.type2test, [Indexable(-1)])
self.assertRaises(ValueError, self.type2test, [Indexable(256)])
def test_from_ssize(self):
self.assertEqual(self.type2test(0), b'')
self.assertEqual(self.type2test(1), b'\x00')
self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00')
self.assertRaises(ValueError, self.type2test, -1)
self.assertEqual(self.type2test('0', 'ascii'), b'0')
self.assertEqual(self.type2test(b'0'), b'0')
self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1)
def test_constructor_type_errors(self):
self.assertRaises(TypeError, self.type2test, 0.0)
class C:
pass
self.assertRaises(TypeError, self.type2test, ["0"])
self.assertRaises(TypeError, self.type2test, [0.0])
self.assertRaises(TypeError, self.type2test, [None])
self.assertRaises(TypeError, self.type2test, [C()])
self.assertRaises(TypeError, self.type2test, 0, 'ascii')
self.assertRaises(TypeError, self.type2test, b'', 'ascii')
self.assertRaises(TypeError, self.type2test, 0, errors='ignore')
self.assertRaises(TypeError, self.type2test, b'', errors='ignore')
self.assertRaises(TypeError, self.type2test, '')
self.assertRaises(TypeError, self.type2test, '', errors='ignore')
self.assertRaises(TypeError, self.type2test, '', b'ascii')
self.assertRaises(TypeError, self.type2test, '', 'ascii', b'ignore')
def test_constructor_value_errors(self):
self.assertRaises(ValueError, self.type2test, [-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize-2])
self.assertRaises(ValueError, self.type2test, [-10**100])
self.assertRaises(ValueError, self.type2test, [256])
self.assertRaises(ValueError, self.type2test, [257])
self.assertRaises(ValueError, self.type2test, [sys.maxsize])
self.assertRaises(ValueError, self.type2test, [sys.maxsize+1])
self.assertRaises(ValueError, self.type2test, [10**100])
@bigaddrspacetest
def test_constructor_overflow(self):
size = MAX_Py_ssize_t
self.assertRaises((OverflowError, MemoryError), self.type2test, size)
try:
# Should either pass or raise an error (e.g. on debug builds with
# additional malloc() overhead), but shouldn't crash.
bytearray(size - 4)
except (OverflowError, MemoryError):
pass
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
b3 = self.type2test([1, 3])
self.assertEqual(b1, b2)
self.assertTrue(b2 != b3)
self.assertTrue(b1 <= b2)
self.assertTrue(b1 <= b3)
self.assertTrue(b1 < b3)
self.assertTrue(b1 >= b2)
self.assertTrue(b3 >= b2)
self.assertTrue(b3 > b2)
self.assertFalse(b1 != b2)
self.assertFalse(b2 == b3)
self.assertFalse(b1 > b2)
self.assertFalse(b1 > b3)
self.assertFalse(b1 >= b3)
self.assertFalse(b1 < b2)
self.assertFalse(b3 < b2)
self.assertFalse(b3 <= b2)
@check_bytes_warnings
def test_compare_to_str(self):
# Byte comparisons with unicode should always fail!
# Test this for all expected byte orders and Unicode character
# sizes.
self.assertEqual(self.type2test(b"\0a\0b\0c") == "abc", False)
self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == "abc",
False)
self.assertEqual(self.type2test(b"a\0b\0c\0") == "abc", False)
self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == "abc",
False)
self.assertEqual(self.type2test() == str(), False)
self.assertEqual(self.type2test() != str(), True)
def test_reversed(self):
input = list(map(ord, "Hello"))
b = self.type2test(input)
output = list(reversed(b))
input.reverse()
self.assertEqual(output, input)
def test_getslice(self):
def by(s):
return self.type2test(map(ord, s))
b = by("Hello, world")
self.assertEqual(b[:5], by("Hello"))
self.assertEqual(b[1:5], by("ello"))
self.assertEqual(b[5:7], by(", "))
self.assertEqual(b[7:], by("world"))
self.assertEqual(b[7:12], by("world"))
self.assertEqual(b[7:100], by("world"))
self.assertEqual(b[:-7], by("Hello"))
self.assertEqual(b[-11:-7], by("ello"))
self.assertEqual(b[-7:-5], by(", "))
self.assertEqual(b[-5:], by("world"))
self.assertEqual(b[-5:12], by("world"))
self.assertEqual(b[-5:100], by("world"))
self.assertEqual(b[-100:5], by("Hello"))
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
L = list(range(255))
b = self.type2test(L)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step]))
def test_encoding(self):
sample = "Hello world\n\u1234\u5678\u9abc"
for enc in ("utf-8", "utf-16"):
b = self.type2test(sample, enc)
self.assertEqual(b, self.type2test(sample.encode(enc)))
self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin-1")
b = self.type2test(sample, "latin-1", "ignore")
self.assertEqual(b, self.type2test(sample[:-3], "utf-8"))
def test_decode(self):
sample = "Hello world\n\u1234\u5678\u9abc\def0\def0"
for enc in ("utf-8", "utf-16"):
b = self.type2test(sample, enc)
self.assertEqual(b.decode(enc), sample)
sample = "Hello world\n\x80\x81\xfe\xff"
b = self.type2test(sample, "latin-1")
self.assertRaises(UnicodeDecodeError, b.decode, "utf-8")
self.assertEqual(b.decode("utf-8", "ignore"), "Hello world\n")
self.assertEqual(b.decode(errors="ignore", encoding="utf-8"),
"Hello world\n")
# Default encoding is utf-8
self.assertEqual(self.type2test(b'\xe2\x98\x83').decode(), '\u2603')
def test_from_int(self):
b = self.type2test(0)
self.assertEqual(b, self.type2test())
b = self.type2test(10)
self.assertEqual(b, self.type2test([0]*10))
b = self.type2test(10000)
self.assertEqual(b, self.type2test([0]*10000))
def test_concat(self):
b1 = self.type2test(b"abc")
b2 = self.type2test(b"def")
self.assertEqual(b1 + b2, b"abcdef")
self.assertEqual(b1 + bytes(b"def"), b"abcdef")
self.assertEqual(bytes(b"def") + b1, b"defabc")
self.assertRaises(TypeError, lambda: b1 + "def")
self.assertRaises(TypeError, lambda: "abc" + b2)
def test_repeat(self):
for b in b"abc", self.type2test(b"abc"):
self.assertEqual(b * 3, b"abcabcabc")
self.assertEqual(b * 0, b"")
self.assertEqual(b * -1, b"")
self.assertRaises(TypeError, lambda: b * 3.14)
self.assertRaises(TypeError, lambda: 3.14 * b)
# XXX Shouldn't bytes and bytearray agree on what to raise?
with self.assertRaises((OverflowError, MemoryError)):
c = b * sys.maxsize
with self.assertRaises((OverflowError, MemoryError)):
b *= sys.maxsize
def test_repeat_1char(self):
self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100))
def test_contains(self):
b = self.type2test(b"abc")
self.assertIn(ord('a'), b)
self.assertIn(int(ord('a')), b)
self.assertNotIn(200, b)
self.assertRaises(ValueError, lambda: 300 in b)
self.assertRaises(ValueError, lambda: -1 in b)
self.assertRaises(TypeError, lambda: None in b)
self.assertRaises(TypeError, lambda: float(ord('a')) in b)
self.assertRaises(TypeError, lambda: "a" in b)
for f in bytes, bytearray:
self.assertIn(f(b""), b)
self.assertIn(f(b"a"), b)
self.assertIn(f(b"b"), b)
self.assertIn(f(b"c"), b)
self.assertIn(f(b"ab"), b)
self.assertIn(f(b"bc"), b)
self.assertIn(f(b"abc"), b)
self.assertNotIn(f(b"ac"), b)
self.assertNotIn(f(b"d"), b)
self.assertNotIn(f(b"dab"), b)
self.assertNotIn(f(b"abd"), b)
def test_fromhex(self):
self.assertRaises(TypeError, self.type2test.fromhex)
self.assertRaises(TypeError, self.type2test.fromhex, 1)
self.assertEqual(self.type2test.fromhex(''), self.type2test())
b = bytearray([0x1a, 0x2b, 0x30])
self.assertEqual(self.type2test.fromhex('1a2B30'), b)
self.assertEqual(self.type2test.fromhex(' 1A 2B 30 '), b)
self.assertEqual(self.type2test.fromhex('0000'), b'\0\0')
self.assertRaises(TypeError, self.type2test.fromhex, b'1B')
self.assertRaises(ValueError, self.type2test.fromhex, 'a')
self.assertRaises(ValueError, self.type2test.fromhex, 'rt')
self.assertRaises(ValueError, self.type2test.fromhex, '1a b cd')
self.assertRaises(ValueError, self.type2test.fromhex, '\x00')
self.assertRaises(ValueError, self.type2test.fromhex, '12 \x00 34')
for data, pos in (
# invalid first hexadecimal character
('12 x4 56', 3),
# invalid second hexadecimal character
('12 3x 56', 4),
# two invalid hexadecimal characters
('12 xy 56', 3),
# test non-ASCII string
('12 3\xff 56', 4),
):
with self.assertRaises(ValueError) as cm:
self.type2test.fromhex(data)
self.assertIn('at position %s' % pos, str(cm.exception))
def test_hex(self):
self.assertRaises(TypeError, self.type2test.hex)
self.assertRaises(TypeError, self.type2test.hex, 1)
self.assertEqual(self.type2test(b"").hex(), "")
self.assertEqual(bytearray([0x1a, 0x2b, 0x30]).hex(), '1a2b30')
self.assertEqual(self.type2test(b"\x1a\x2b\x30").hex(), '1a2b30')
self.assertEqual(memoryview(b"\x1a\x2b\x30").hex(), '1a2b30')
def test_join(self):
self.assertEqual(self.type2test(b"").join([]), b"")
self.assertEqual(self.type2test(b"").join([b""]), b"")
for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]:
lst = list(map(self.type2test, lst))
self.assertEqual(self.type2test(b"").join(lst), b"abc")
self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc")
self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc")
dot_join = self.type2test(b".:").join
self.assertEqual(dot_join([b"ab", b"cd"]), b"ab.:cd")
self.assertEqual(dot_join([memoryview(b"ab"), b"cd"]), b"ab.:cd")
self.assertEqual(dot_join([b"ab", memoryview(b"cd")]), b"ab.:cd")
self.assertEqual(dot_join([bytearray(b"ab"), b"cd"]), b"ab.:cd")
self.assertEqual(dot_join([b"ab", bytearray(b"cd")]), b"ab.:cd")
# Stress it with many items
seq = [b"abc"] * 1000
expected = b"abc" + b".:abc" * 999
self.assertEqual(dot_join(seq), expected)
self.assertRaises(TypeError, self.type2test(b" ").join, None)
# Error handling and cleanup when some item in the middle of the
# sequence has the wrong type.
with self.assertRaises(TypeError):
dot_join([bytearray(b"ab"), "cd", b"ef"])
with self.assertRaises(TypeError):
dot_join([memoryview(b"ab"), "cd", b"ef"])
def test_count(self):
b = self.type2test(b'mississippi')
i = 105
p = 112
w = 119
self.assertEqual(b.count(b'i'), 4)
self.assertEqual(b.count(b'ss'), 2)
self.assertEqual(b.count(b'w'), 0)
self.assertEqual(b.count(i), 4)
self.assertEqual(b.count(w), 0)
self.assertEqual(b.count(b'i', 6), 2)
self.assertEqual(b.count(b'p', 6), 2)
self.assertEqual(b.count(b'i', 1, 3), 1)
self.assertEqual(b.count(b'p', 7, 9), 1)
self.assertEqual(b.count(i, 6), 2)
self.assertEqual(b.count(p, 6), 2)
self.assertEqual(b.count(i, 1, 3), 1)
self.assertEqual(b.count(p, 7, 9), 1)
def test_startswith(self):
b = self.type2test(b'hello')
self.assertFalse(self.type2test().startswith(b"anything"))
self.assertTrue(b.startswith(b"hello"))
self.assertTrue(b.startswith(b"hel"))
self.assertTrue(b.startswith(b"h"))
self.assertFalse(b.startswith(b"hellow"))
self.assertFalse(b.startswith(b"ha"))
with self.assertRaises(TypeError) as cm:
b.startswith([b'h'])
exc = str(cm.exception)
self.assertIn('bytes', exc)
self.assertIn('tuple', exc)
def test_endswith(self):
b = self.type2test(b'hello')
self.assertFalse(bytearray().endswith(b"anything"))
self.assertTrue(b.endswith(b"hello"))
self.assertTrue(b.endswith(b"llo"))
self.assertTrue(b.endswith(b"o"))
self.assertFalse(b.endswith(b"whello"))
self.assertFalse(b.endswith(b"no"))
with self.assertRaises(TypeError) as cm:
b.endswith([b'o'])
exc = str(cm.exception)
self.assertIn('bytes', exc)
self.assertIn('tuple', exc)
def test_find(self):
b = self.type2test(b'mississippi')
i = 105
w = 119
self.assertEqual(b.find(b'ss'), 2)
self.assertEqual(b.find(b'w'), -1)
self.assertEqual(b.find(b'mississippian'), -1)
self.assertEqual(b.find(i), 1)
self.assertEqual(b.find(w), -1)
self.assertEqual(b.find(b'ss', 3), 5)
self.assertEqual(b.find(b'ss', 1, 7), 2)
self.assertEqual(b.find(b'ss', 1, 3), -1)
self.assertEqual(b.find(i, 6), 7)
self.assertEqual(b.find(i, 1, 3), 1)
self.assertEqual(b.find(w, 1, 3), -1)
for index in (-1, 256, sys.maxsize + 1):
self.assertRaisesRegex(
ValueError, r'byte must be in range\(0, 256\)',
b.find, index)
def test_rfind(self):
b = self.type2test(b'mississippi')
i = 105
w = 119
self.assertEqual(b.rfind(b'ss'), 5)
self.assertEqual(b.rfind(b'w'), -1)
self.assertEqual(b.rfind(b'mississippian'), -1)
self.assertEqual(b.rfind(i), 10)
self.assertEqual(b.rfind(w), -1)
self.assertEqual(b.rfind(b'ss', 3), 5)
self.assertEqual(b.rfind(b'ss', 0, 6), 2)
self.assertEqual(b.rfind(i, 1, 3), 1)
self.assertEqual(b.rfind(i, 3, 9), 7)
self.assertEqual(b.rfind(w, 1, 3), -1)
def test_index(self):
b = self.type2test(b'mississippi')
i = 105
w = 119
self.assertEqual(b.index(b'ss'), 2)
self.assertRaises(ValueError, b.index, b'w')
self.assertRaises(ValueError, b.index, b'mississippian')
self.assertEqual(b.index(i), 1)
self.assertRaises(ValueError, b.index, w)
self.assertEqual(b.index(b'ss', 3), 5)
self.assertEqual(b.index(b'ss', 1, 7), 2)
self.assertRaises(ValueError, b.index, b'ss', 1, 3)
self.assertEqual(b.index(i, 6), 7)
self.assertEqual(b.index(i, 1, 3), 1)
self.assertRaises(ValueError, b.index, w, 1, 3)
def test_rindex(self):
b = self.type2test(b'mississippi')
i = 105
w = 119
self.assertEqual(b.rindex(b'ss'), 5)
self.assertRaises(ValueError, b.rindex, b'w')
self.assertRaises(ValueError, b.rindex, b'mississippian')
self.assertEqual(b.rindex(i), 10)
self.assertRaises(ValueError, b.rindex, w)
self.assertEqual(b.rindex(b'ss', 3), 5)
self.assertEqual(b.rindex(b'ss', 0, 6), 2)
self.assertEqual(b.rindex(i, 1, 3), 1)
self.assertEqual(b.rindex(i, 3, 9), 7)
self.assertRaises(ValueError, b.rindex, w, 1, 3)
def test_mod(self):
b = b'hello, %b!'
orig = b
b = b % b'world'
self.assertEqual(b, b'hello, world!')
self.assertEqual(orig, b'hello, %b!')
self.assertFalse(b is orig)
b = b'%s / 100 = %d%%'
a = b % (b'seventy-nine', 79)
self.assertEqual(a, b'seventy-nine / 100 = 79%')
def test_imod(self):
b = b'hello, %b!'
orig = b
b %= b'world'
self.assertEqual(b, b'hello, world!')
self.assertEqual(orig, b'hello, %b!')
self.assertFalse(b is orig)
b = b'%s / 100 = %d%%'
b %= (b'seventy-nine', 79)
self.assertEqual(b, b'seventy-nine / 100 = 79%')
def test_replace(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
def test_split(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.split(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.split(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.split(b'w'), [b])
# with keyword args
b = self.type2test(b'a|b|c|d')
self.assertEqual(b.split(sep=b'|'), [b'a', b'b', b'c', b'd'])
self.assertEqual(b.split(b'|', maxsplit=1), [b'a', b'b|c|d'])
self.assertEqual(b.split(sep=b'|', maxsplit=1), [b'a', b'b|c|d'])
self.assertEqual(b.split(maxsplit=1, sep=b'|'), [b'a', b'b|c|d'])
b = self.type2test(b'a b c d')
self.assertEqual(b.split(maxsplit=1), [b'a', b'b c d'])
def test_split_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.split(), [b'arf', b'barf'])
self.assertEqual(b.split(None), [b'arf', b'barf'])
self.assertEqual(b.split(None, 2), [b'arf', b'barf'])
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
b = self.type2test(b)
self.assertEqual(b.split(), [b])
self.assertEqual(self.type2test(b' a bb c ').split(None, 0), [b'a bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 1), [b'a', b'bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 2), [b'a', b'bb', b'c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 3), [b'a', b'bb', b'c'])
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, ' ')
def test_split_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
def test_rsplit(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rsplit(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.rsplit(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.rsplit(b'w'), [b])
# with keyword args
b = self.type2test(b'a|b|c|d')
self.assertEqual(b.rsplit(sep=b'|'), [b'a', b'b', b'c', b'd'])
self.assertEqual(b.rsplit(b'|', maxsplit=1), [b'a|b|c', b'd'])
self.assertEqual(b.rsplit(sep=b'|', maxsplit=1), [b'a|b|c', b'd'])
self.assertEqual(b.rsplit(maxsplit=1, sep=b'|'), [b'a|b|c', b'd'])
b = self.type2test(b'a b c d')
self.assertEqual(b.rsplit(maxsplit=1), [b'a b c', b'd'])
def test_rsplit_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.rsplit(), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None, 2), [b'arf', b'barf'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 0), [b' a bb c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 1), [b' a bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 2), [b' a', b'bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 3), [b'a', b'bb', b'c'])
def test_rsplit_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').rsplit, ' ')
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
def test_partition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi'))
self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b''))
def test_rpartition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi'))
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
b = self.type2test(b)
ps = pickle.dumps(b, proto)
q = pickle.loads(ps)
self.assertEqual(b, q)
def test_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
it = itorg = iter(self.type2test(b))
data = list(self.type2test(b))
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), data)
it = pickle.loads(d)
if not b:
continue
next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), data[1:])
def test_strip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.strip(b'i'), b'mississipp')
self.assertEqual(b.strip(b'm'), b'ississippi')
self.assertEqual(b.strip(b'pi'), b'mississ')
self.assertEqual(b.strip(b'im'), b'ssissipp')
self.assertEqual(b.strip(b'pim'), b'ssiss')
self.assertEqual(b.strip(b), b'')
def test_lstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.lstrip(b'i'), b'mississippi')
self.assertEqual(b.lstrip(b'm'), b'ississippi')
self.assertEqual(b.lstrip(b'pi'), b'mississippi')
self.assertEqual(b.lstrip(b'im'), b'ssissippi')
self.assertEqual(b.lstrip(b'pim'), b'ssissippi')
def test_rstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rstrip(b'i'), b'mississipp')
self.assertEqual(b.rstrip(b'm'), b'mississippi')
self.assertEqual(b.rstrip(b'pi'), b'mississ')
self.assertEqual(b.rstrip(b'im'), b'mississipp')
self.assertEqual(b.rstrip(b'pim'), b'mississ')
def test_strip_whitespace(self):
b = self.type2test(b' \t\n\r\f\vabc \t\n\r\f\v')
self.assertEqual(b.strip(), b'abc')
self.assertEqual(b.lstrip(), b'abc \t\n\r\f\v')
self.assertEqual(b.rstrip(), b' \t\n\r\f\vabc')
def test_strip_bytearray(self):
self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b')
self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc')
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
self.assertRaises(TypeError, self.type2test(b'abc').strip, 'b')
self.assertRaises(TypeError, self.type2test(b'abc').lstrip, 'b')
self.assertRaises(TypeError, self.type2test(b'abc').rstrip, 'b')
def test_center(self):
# Fill character can be either bytes or bytearray (issue 12380)
b = self.type2test(b'abc')
for fill_type in (bytes, bytearray):
self.assertEqual(b.center(7, fill_type(b'-')),
self.type2test(b'--abc--'))
def test_ljust(self):
# Fill character can be either bytes or bytearray (issue 12380)
b = self.type2test(b'abc')
for fill_type in (bytes, bytearray):
self.assertEqual(b.ljust(7, fill_type(b'-')),
self.type2test(b'abc----'))
def test_rjust(self):
# Fill character can be either bytes or bytearray (issue 12380)
b = self.type2test(b'abc')
for fill_type in (bytes, bytearray):
self.assertEqual(b.rjust(7, fill_type(b'-')),
self.type2test(b'----abc'))
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
self.assertEqual([ord(b[i:i+1]) for i in range(len(b))],
[0, 65, 127, 128, 255])
def test_maketrans(self):
transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
self.assertEqual(self.type2test.maketrans(b'abc', b'xyz'), transtable)
transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374xyz'
self.assertEqual(self.type2test.maketrans(b'\375\376\377', b'xyz'), transtable)
self.assertRaises(ValueError, self.type2test.maketrans, b'abc', b'xyzq')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def')
def test_none_arguments(self):
# issue 11828
b = self.type2test(b'hello')
l = self.type2test(b'l')
h = self.type2test(b'h')
x = self.type2test(b'x')
o = self.type2test(b'o')
self.assertEqual(2, b.find(l, None))
self.assertEqual(3, b.find(l, -2, None))
self.assertEqual(2, b.find(l, None, -2))
self.assertEqual(0, b.find(h, None, None))
self.assertEqual(3, b.rfind(l, None))
self.assertEqual(3, b.rfind(l, -2, None))
self.assertEqual(2, b.rfind(l, None, -2))
self.assertEqual(0, b.rfind(h, None, None))
self.assertEqual(2, b.index(l, None))
self.assertEqual(3, b.index(l, -2, None))
self.assertEqual(2, b.index(l, None, -2))
self.assertEqual(0, b.index(h, None, None))
self.assertEqual(3, b.rindex(l, None))
self.assertEqual(3, b.rindex(l, -2, None))
self.assertEqual(2, b.rindex(l, None, -2))
self.assertEqual(0, b.rindex(h, None, None))
self.assertEqual(2, b.count(l, None))
self.assertEqual(1, b.count(l, -2, None))
self.assertEqual(1, b.count(l, None, -2))
self.assertEqual(0, b.count(x, None, None))
self.assertEqual(True, b.endswith(o, None))
self.assertEqual(True, b.endswith(o, -2, None))
self.assertEqual(True, b.endswith(l, None, -2))
self.assertEqual(False, b.endswith(x, None, None))
self.assertEqual(True, b.startswith(h, None))
self.assertEqual(True, b.startswith(l, -2, None))
self.assertEqual(True, b.startswith(h, None, -2))
self.assertEqual(False, b.startswith(x, None, None))
def test_integer_arguments_out_of_byte_range(self):
b = self.type2test(b'hello')
for method in (b.count, b.find, b.index, b.rfind, b.rindex):
self.assertRaises(ValueError, method, -1)
self.assertRaises(ValueError, method, 256)
self.assertRaises(ValueError, method, 9999)
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
b = self.type2test(b'hello')
x = self.type2test(b'x')
self.assertRaisesRegex(TypeError, r'\bfind\b', b.find,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\brfind\b', b.rfind,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bindex\b', b.index,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\brindex\b', b.rindex,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bcount\b', b.count,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bstartswith\b', b.startswith,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bendswith\b', b.endswith,
x, None, None, None)
def test_free_after_iterating(self):
test.support.check_free_after_iterating(self, iter, self.type2test)
test.support.check_free_after_iterating(self, reversed, self.type2test)
class BytesTest(BaseBytesTest, unittest.TestCase):
type2test = bytes
def test_getitem_error(self):
msg = "byte indices must be integers or slices"
with self.assertRaisesRegex(TypeError, msg):
b'python'['a']
def test_buffer_is_readonly(self):
fd = os.open(__file__, os.O_RDONLY)
with open(fd, "rb", buffering=0) as f:
self.assertRaises(TypeError, f.readinto, b"")
def test_custom(self):
class A:
def __bytes__(self):
return b'abc'
self.assertEqual(bytes(A()), b'abc')
class A: pass
self.assertRaises(TypeError, bytes, A())
class A:
def __bytes__(self):
return None
self.assertRaises(TypeError, bytes, A())
class A:
def __bytes__(self):
return b'a'
def __index__(self):
return 42
self.assertEqual(bytes(A()), b'a')
# Issue #25766
class A(str):
def __bytes__(self):
return b'abc'
self.assertEqual(bytes(A('\u20ac')), b'abc')
self.assertEqual(bytes(A('\u20ac'), 'iso8859-15'), b'\xa4')
# Issue #24731
class A:
def __bytes__(self):
return OtherBytesSubclass(b'abc')
self.assertEqual(bytes(A()), b'abc')
self.assertIs(type(bytes(A())), OtherBytesSubclass)
self.assertEqual(BytesSubclass(A()), b'abc')
self.assertIs(type(BytesSubclass(A())), BytesSubclass)
# Test PyBytes_FromFormat()
def test_from_format(self):
ctypes = test.support.import_module('ctypes')
_testcapi = test.support.import_module('_testcapi')
from ctypes import pythonapi, py_object
from ctypes import (
c_int, c_uint,
c_long, c_ulong,
c_size_t, c_ssize_t,
c_char_p)
PyBytes_FromFormat = pythonapi.PyBytes_FromFormat
PyBytes_FromFormat.restype = py_object
# basic tests
self.assertEqual(PyBytes_FromFormat(b'format'),
b'format')
self.assertEqual(PyBytes_FromFormat(b'Hello %s !', b'world'),
b'Hello world !')
# test formatters
self.assertEqual(PyBytes_FromFormat(b'c=%c', c_int(0)),
b'c=\0')
self.assertEqual(PyBytes_FromFormat(b'c=%c', c_int(ord('@'))),
b'c=@')
self.assertEqual(PyBytes_FromFormat(b'c=%c', c_int(255)),
b'c=\xff')
self.assertEqual(PyBytes_FromFormat(b'd=%d ld=%ld zd=%zd',
c_int(1), c_long(2),
c_size_t(3)),
b'd=1 ld=2 zd=3')
self.assertEqual(PyBytes_FromFormat(b'd=%d ld=%ld zd=%zd',
c_int(-1), c_long(-2),
c_size_t(-3)),
b'd=-1 ld=-2 zd=-3')
self.assertEqual(PyBytes_FromFormat(b'u=%u lu=%lu zu=%zu',
c_uint(123), c_ulong(456),
c_size_t(789)),
b'u=123 lu=456 zu=789')
self.assertEqual(PyBytes_FromFormat(b'i=%i', c_int(123)),
b'i=123')
self.assertEqual(PyBytes_FromFormat(b'i=%i', c_int(-123)),
b'i=-123')
self.assertEqual(PyBytes_FromFormat(b'x=%x', c_int(0xabc)),
b'x=abc')
sizeof_ptr = ctypes.sizeof(c_char_p)
if os.name == 'nt':
# Windows (MSCRT)
ptr_format = '0x%0{}X'.format(2 * sizeof_ptr)
def ptr_formatter(ptr):
return (ptr_format % ptr)
else:
# UNIX (glibc)
def ptr_formatter(ptr):
return '%#x' % ptr
ptr = 0xabcdef
self.assertEqual(PyBytes_FromFormat(b'ptr=%p', c_char_p(ptr)),
('ptr=' + ptr_formatter(ptr)).encode('ascii'))
self.assertEqual(PyBytes_FromFormat(b's=%s', c_char_p(b'cstr')),
b's=cstr')
# test minimum and maximum integer values
size_max = c_size_t(-1).value
for formatstr, ctypes_type, value, py_formatter in (
(b'%d', c_int, _testcapi.INT_MIN, str),
(b'%d', c_int, _testcapi.INT_MAX, str),
(b'%ld', c_long, _testcapi.LONG_MIN, str),
(b'%ld', c_long, _testcapi.LONG_MAX, str),
(b'%lu', c_ulong, _testcapi.ULONG_MAX, str),
(b'%zd', c_ssize_t, _testcapi.PY_SSIZE_T_MIN, str),
(b'%zd', c_ssize_t, _testcapi.PY_SSIZE_T_MAX, str),
(b'%zu', c_size_t, size_max, str),
(b'%p', c_char_p, size_max, ptr_formatter),
):
self.assertEqual(PyBytes_FromFormat(formatstr, ctypes_type(value)),
py_formatter(value).encode('ascii')),
# width and precision (width is currently ignored)
self.assertEqual(PyBytes_FromFormat(b'%5s', b'a'),
b'a')
self.assertEqual(PyBytes_FromFormat(b'%.3s', b'abcdef'),
b'abc')
# '%%' formatter
self.assertEqual(PyBytes_FromFormat(b'%%'),
b'%')
self.assertEqual(PyBytes_FromFormat(b'[%%]'),
b'[%]')
self.assertEqual(PyBytes_FromFormat(b'%%%c', c_int(ord('_'))),
b'%_')
self.assertEqual(PyBytes_FromFormat(b'%%s'),
b'%s')
# Invalid formats and partial formatting
self.assertEqual(PyBytes_FromFormat(b'%'), b'%')
self.assertEqual(PyBytes_FromFormat(b'x=%i y=%', c_int(2), c_int(3)),
b'x=2 y=%')
# Issue #19969: %c must raise OverflowError for values
# not in the range [0; 255]
self.assertRaises(OverflowError,
PyBytes_FromFormat, b'%c', c_int(-1))
self.assertRaises(OverflowError,
PyBytes_FromFormat, b'%c', c_int(256))
class ByteArrayTest(BaseBytesTest, unittest.TestCase):
type2test = bytearray
def test_getitem_error(self):
msg = "bytearray indices must be integers or slices"
with self.assertRaisesRegex(TypeError, msg):
bytearray(b'python')['a']
def test_setitem_error(self):
msg = "bytearray indices must be integers or slices"
with self.assertRaisesRegex(TypeError, msg):
b = bytearray(b'python')
b['a'] = "python"
def test_nohash(self):
self.assertRaises(TypeError, hash, bytearray())
def test_bytearray_api(self):
short_sample = b"Hello world\n"
sample = short_sample + b"\0"*(20 - len(short_sample))
tfn = tempfile.mktemp()
try:
# Prepare
with open(tfn, "wb") as f:
f.write(short_sample)
# Test readinto
with open(tfn, "rb") as f:
b = bytearray(20)
n = f.readinto(b)
self.assertEqual(n, len(short_sample))
self.assertEqual(list(b), list(sample))
# Test writing in binary mode
with open(tfn, "wb") as f:
f.write(b)
with open(tfn, "rb") as f:
self.assertEqual(f.read(), sample)
# Text mode is ambiguous; don't test
finally:
try:
os.remove(tfn)
except OSError:
pass
def test_reverse(self):
b = bytearray(b'hello')
self.assertEqual(b.reverse(), None)
self.assertEqual(b, b'olleh')
b = bytearray(b'hello1') # test even number of items
b.reverse()
self.assertEqual(b, b'1olleh')
b = bytearray()
b.reverse()
self.assertFalse(b)
def test_clear(self):
b = bytearray(b'python')
b.clear()
self.assertEqual(b, b'')
b = bytearray(b'')
b.clear()
self.assertEqual(b, b'')
b = bytearray(b'')
b.append(ord('r'))
b.clear()
b.append(ord('p'))
self.assertEqual(b, b'p')
def test_copy(self):
b = bytearray(b'abc')
bb = b.copy()
self.assertEqual(bb, b'abc')
b = bytearray(b'')
bb = b.copy()
self.assertEqual(bb, b'')
# test that it's indeed a copy and not a reference
b = bytearray(b'abc')
bb = b.copy()
self.assertEqual(b, bb)
self.assertIsNot(b, bb)
bb.append(ord('d'))
self.assertEqual(bb, b'abcd')
self.assertEqual(b, b'abc')
def test_regexps(self):
def by(s):
return bytearray(map(ord, s))
b = by("Hello, world")
self.assertEqual(re.findall(br"\w+", b), [by("Hello"), by("world")])
def test_setitem(self):
b = bytearray([1, 2, 3])
b[1] = 100
self.assertEqual(b, bytearray([1, 100, 3]))
b[-1] = 200
self.assertEqual(b, bytearray([1, 100, 200]))
b[0] = Indexable(10)
self.assertEqual(b, bytearray([10, 100, 200]))
try:
b[3] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[-10] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[0] = 256
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = Indexable(-1)
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = None
self.fail("Didn't raise TypeError")
except TypeError:
pass
def test_delitem(self):
b = bytearray(range(10))
del b[0]
self.assertEqual(b, bytearray(range(1, 10)))
del b[-1]
self.assertEqual(b, bytearray(range(1, 9)))
del b[4]
self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8]))
def test_setslice(self):
b = bytearray(range(10))
self.assertEqual(list(b), list(range(10)))
b[0:5] = bytearray([1, 1, 1, 1, 1])
self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9]))
del b[0:-5]
self.assertEqual(b, bytearray([5, 6, 7, 8, 9]))
b[0:0] = bytearray([0, 1, 2, 3, 4])
self.assertEqual(b, bytearray(range(10)))
b[-7:-3] = bytearray([100, 101])
self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9]))
b[3:5] = [3, 4, 5, 6]
self.assertEqual(b, bytearray(range(10)))
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
b[3:] = b'foo'
self.assertEqual(b, bytearray([0, 1, 2, 102, 111, 111]))
b[:3] = memoryview(b'foo')
self.assertEqual(b, bytearray([102, 111, 111, 102, 111, 111]))
b[3:4] = []
self.assertEqual(b, bytearray([102, 111, 111, 111, 111]))
for elem in [5, -5, 0, int(10e20), 'str', 2.3,
['a', 'b'], [b'a', b'b'], [[]]]:
with self.assertRaises(TypeError):
b[3:4] = elem
for elem in [[254, 255, 256], [-256, 9000]]:
with self.assertRaises(ValueError):
b[3:4] = elem
def test_setslice_extend(self):
# Exercise the resizing logic (see issue #19087)
b = bytearray(range(100))
self.assertEqual(list(b), list(range(100)))
del b[:10]
self.assertEqual(list(b), list(range(10, 100)))
b.extend(range(100, 110))
self.assertEqual(list(b), list(range(10, 110)))
def test_fifo_overrun(self):
# Test for issue #23985, a buffer overrun when implementing a FIFO
# Build Python in pydebug mode for best results.
b = bytearray(10)
b.pop() # Defeat expanding buffer off-by-one quirk
del b[:1] # Advance start pointer without reallocating
b += bytes(2) # Append exactly the number of deleted bytes
del b # Free memory buffer, allowing pydebug verification
def test_del_expand(self):
# Reducing the size should not expand the buffer (issue #23985)
b = bytearray(10)
size = sys.getsizeof(b)
del b[:1]
self.assertLessEqual(sys.getsizeof(b), size)
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
L = list(range(255))
b = bytearray(L)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
b[start:stop:step] = data
self.assertEqual(b, bytearray(L))
del L[start:stop:step]
del b[start:stop:step]
self.assertEqual(b, bytearray(L))
def test_setslice_trap(self):
# This test verifies that we correctly handle assigning self
# to a slice of self (the old Lambert Meertens trap).
b = bytearray(range(256))
b[8:] = b
self.assertEqual(b, bytearray(list(range(8)) + list(range(256))))
def test_mod(self):
b = bytearray(b'hello, %b!')
orig = b
b = b % b'world'
self.assertEqual(b, b'hello, world!')
self.assertEqual(orig, bytearray(b'hello, %b!'))
self.assertFalse(b is orig)
b = bytearray(b'%s / 100 = %d%%')
a = b % (b'seventy-nine', 79)
self.assertEqual(a, bytearray(b'seventy-nine / 100 = 79%'))
def test_imod(self):
b = bytearray(b'hello, %b!')
orig = b
b %= b'world'
self.assertEqual(b, b'hello, world!')
self.assertEqual(orig, bytearray(b'hello, %b!'))
self.assertFalse(b is orig)
b = bytearray(b'%s / 100 = %d%%')
b %= (b'seventy-nine', 79)
self.assertEqual(b, bytearray(b'seventy-nine / 100 = 79%'))
def test_iconcat(self):
b = bytearray(b"abc")
b1 = b
b += b"def"
self.assertEqual(b, b"abcdef")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
b += b"xyz"
self.assertEqual(b, b"abcdefxyz")
try:
b += ""
except TypeError:
pass
else:
self.fail("bytes += unicode didn't raise TypeError")
def test_irepeat(self):
b = bytearray(b"abc")
b1 = b
b *= 3
self.assertEqual(b, b"abcabcabc")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_irepeat_1char(self):
b = bytearray(b"x")
b1 = b
b *= 100
self.assertEqual(b, b"x"*100)
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_alloc(self):
b = bytearray()
alloc = b.__alloc__()
self.assertTrue(alloc >= 0)
seq = [alloc]
for i in range(100):
b += b"x"
alloc = b.__alloc__()
self.assertGreater(alloc, len(b)) # including trailing null byte
if alloc not in seq:
seq.append(alloc)
def test_init_alloc(self):
b = bytearray()
def g():
for i in range(1, 100):
yield i
a = list(b)
self.assertEqual(a, list(range(1, len(a)+1)))
self.assertEqual(len(b), len(a))
self.assertLessEqual(len(b), i)
alloc = b.__alloc__()
self.assertGreater(alloc, len(b)) # including trailing null byte
b.__init__(g())
self.assertEqual(list(b), list(range(1, 100)))
self.assertEqual(len(b), 99)
alloc = b.__alloc__()
self.assertGreater(alloc, len(b))
def test_extend(self):
orig = b'hello'
a = bytearray(orig)
a.extend(a)
self.assertEqual(a, orig + orig)
self.assertEqual(a[5:], orig)
a = bytearray(b'')
# Test iterators that don't have a __length_hint__
a.extend(map(int, orig * 25))
a.extend(int(x) for x in orig * 25)
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(iter(map(int, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(list(map(int, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
self.assertRaises(ValueError, a.extend, [0, 1, 2, 256])
self.assertRaises(ValueError, a.extend, [0, 1, 2, -1])
self.assertEqual(len(a), 0)
a = bytearray(b'')
a.extend([Indexable(ord('a'))])
self.assertEqual(a, b'a')
def test_remove(self):
b = bytearray(b'hello')
b.remove(ord('l'))
self.assertEqual(b, b'helo')
b.remove(ord('l'))
self.assertEqual(b, b'heo')
self.assertRaises(ValueError, lambda: b.remove(ord('l')))
self.assertRaises(ValueError, lambda: b.remove(400))
self.assertRaises(TypeError, lambda: b.remove('e'))
# remove first and last
b.remove(ord('o'))
b.remove(ord('h'))
self.assertEqual(b, b'e')
self.assertRaises(TypeError, lambda: b.remove(b'e'))
b.remove(Indexable(ord('e')))
self.assertEqual(b, b'')
def test_pop(self):
b = bytearray(b'world')
self.assertEqual(b.pop(), ord('d'))
self.assertEqual(b.pop(0), ord('w'))
self.assertEqual(b.pop(-2), ord('r'))
self.assertRaises(IndexError, lambda: b.pop(10))
self.assertRaises(IndexError, lambda: bytearray().pop())
# test for issue #6846
self.assertEqual(bytearray(b'\xff').pop(), 0xff)
def test_nosort(self):
self.assertRaises(AttributeError, lambda: bytearray().sort())
def test_append(self):
b = bytearray(b'hell')
b.append(ord('o'))
self.assertEqual(b, b'hello')
self.assertEqual(b.append(100), None)
b = bytearray()
b.append(ord('A'))
self.assertEqual(len(b), 1)
self.assertRaises(TypeError, lambda: b.append(b'o'))
b = bytearray()
b.append(Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_insert(self):
b = bytearray(b'msssspp')
b.insert(1, ord('i'))
b.insert(4, ord('i'))
b.insert(-2, ord('i'))
b.insert(1000, ord('i'))
self.assertEqual(b, b'mississippi')
self.assertRaises(TypeError, lambda: b.insert(0, b'1'))
b = bytearray()
b.insert(0, Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_copied(self):
# Issue 4348. Make sure that operations that don't mutate the array
# copy the bytes.
b = bytearray(b'abc')
self.assertFalse(b is b.replace(b'abc', b'cde', 0))
t = bytearray([i for i in range(256)])
x = bytearray(b'')
self.assertFalse(x is x.translate(t))
def test_partition_bytearray_doesnt_share_nullstring(self):
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
# Same for rpartition
b, c, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
c, b, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
def test_resize_forbidden(self):
# #4509: can't resize a bytearray when there are buffer exports, even
# if it wouldn't reallocate the underlying buffer.
# Furthermore, no destructive changes to the buffer may be applied
# before raising the error.
b = bytearray(range(10))
v = memoryview(b)
def resize(n):
b[1:-1] = range(n + 1, 2*n - 1)
resize(10)
orig = b[:]
self.assertRaises(BufferError, resize, 11)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 9)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 0)
self.assertEqual(b, orig)
# Other operations implying resize
self.assertRaises(BufferError, b.pop, 0)
self.assertEqual(b, orig)
self.assertRaises(BufferError, b.remove, b[1])
self.assertEqual(b, orig)
def delitem():
del b[1]
self.assertRaises(BufferError, delitem)
self.assertEqual(b, orig)
# deleting a non-contiguous slice
def delslice():
b[1:-1:2] = b""
self.assertRaises(BufferError, delslice)
self.assertEqual(b, orig)
@test.support.cpython_only
def test_obsolete_write_lock(self):
from _testcapi import getbuffer_with_null_view
self.assertRaises(BufferError, getbuffer_with_null_view, bytearray())
def test_iterator_pickling2(self):
orig = bytearray(b'abc')
data = list(b'qwerty')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# initial iterator
itorig = iter(orig)
d = pickle.dumps((itorig, orig), proto)
it, b = pickle.loads(d)
b[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data)
# running iterator
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, b = pickle.loads(d)
b[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[1:])
# empty iterator
for i in range(1, len(orig)):
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, b = pickle.loads(d)
b[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[len(orig):])
# exhausted iterator
self.assertRaises(StopIteration, next, itorig)
d = pickle.dumps((itorig, orig), proto)
it, b = pickle.loads(d)
b[:] = data
self.assertEqual(list(it), [])
test_exhausted_iterator = test.list_tests.CommonTest.test_exhausted_iterator
class AssortedBytesTest(unittest.TestCase):
#
# Test various combinations of bytes and bytearray
#
@check_bytes_warnings
def test_repr_str(self):
for f in str, repr:
self.assertEqual(f(bytearray()), "bytearray(b'')")
self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')")
self.assertEqual(f(bytearray([0, 1, 254, 255])),
"bytearray(b'\\x00\\x01\\xfe\\xff')")
self.assertEqual(f(b"abc"), "b'abc'")
self.assertEqual(f(b"'"), '''b"'"''') # '''
self.assertEqual(f(b"'\""), r"""b'\'"'""") # '
def test_compare_bytes_to_bytearray(self):
self.assertEqual(b"abc" == bytes(b"abc"), True)
self.assertEqual(b"ab" != bytes(b"abc"), True)
self.assertEqual(b"ab" <= bytes(b"abc"), True)
self.assertEqual(b"ab" < bytes(b"abc"), True)
self.assertEqual(b"abc" >= bytes(b"ab"), True)
self.assertEqual(b"abc" > bytes(b"ab"), True)
self.assertEqual(b"abc" != bytes(b"abc"), False)
self.assertEqual(b"ab" == bytes(b"abc"), False)
self.assertEqual(b"ab" > bytes(b"abc"), False)
self.assertEqual(b"ab" >= bytes(b"abc"), False)
self.assertEqual(b"abc" < bytes(b"ab"), False)
self.assertEqual(b"abc" <= bytes(b"ab"), False)
self.assertEqual(bytes(b"abc") == b"abc", True)
self.assertEqual(bytes(b"ab") != b"abc", True)
self.assertEqual(bytes(b"ab") <= b"abc", True)
self.assertEqual(bytes(b"ab") < b"abc", True)
self.assertEqual(bytes(b"abc") >= b"ab", True)
self.assertEqual(bytes(b"abc") > b"ab", True)
self.assertEqual(bytes(b"abc") != b"abc", False)
self.assertEqual(bytes(b"ab") == b"abc", False)
self.assertEqual(bytes(b"ab") > b"abc", False)
self.assertEqual(bytes(b"ab") >= b"abc", False)
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
@test.support.requires_docstrings
def test_doc(self):
self.assertIsNotNone(bytearray.__doc__)
self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
self.assertIsNotNone(bytes.__doc__)
self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__)
def test_from_bytearray(self):
sample = bytes(b"Hello world\n\x80\x81\xfe\xff")
buf = memoryview(sample)
b = bytearray(buf)
self.assertEqual(b, bytearray(sample))
@check_bytes_warnings
def test_to_str(self):
self.assertEqual(str(b''), "b''")
self.assertEqual(str(b'x'), "b'x'")
self.assertEqual(str(b'\x80'), "b'\\x80'")
self.assertEqual(str(bytearray(b'')), "bytearray(b'')")
self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')")
self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')")
def test_literal(self):
tests = [
(b"Wonderful spam", "Wonderful spam"),
(br"Wonderful spam too", "Wonderful spam too"),
(b"\xaa\x00\000\200", "\xaa\x00\000\200"),
(br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
]
for b, s in tests:
self.assertEqual(b, bytearray(s, 'latin-1'))
for c in range(128, 256):
self.assertRaises(SyntaxError, eval,
'b"%s"' % chr(c))
def test_translate(self):
b = b'hello'
ba = bytearray(b)
rosetta = bytearray(range(0, 256))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = ba.translate(rosetta, b'l')
self.assertEqual(ba, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = ba.translate(None, b'e')
self.assertEqual(c, b'hllo')
self.assertRaises(TypeError, b.translate, None, None)
self.assertRaises(TypeError, ba.translate, None, None)
def test_split_bytearray(self):
self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b'])
def test_rsplit_bytearray(self):
self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b'])
def test_return_self(self):
# bytearray.replace must always return a new bytearray
b = bytearray()
self.assertFalse(b.replace(b'', b'') is b)
@unittest.skipUnless(sys.flags.bytes_warning,
"BytesWarning is needed for this test: use -bb option")
def test_compare(self):
def bytes_warning():
return test.support.check_warnings(('', BytesWarning))
with bytes_warning():
b'' == ''
with bytes_warning():
'' == b''
with bytes_warning():
b'' != ''
with bytes_warning():
'' != b''
with bytes_warning():
bytearray(b'') == ''
with bytes_warning():
'' == bytearray(b'')
with bytes_warning():
bytearray(b'') != ''
with bytes_warning():
'' != bytearray(b'')
with bytes_warning():
b'\0' == 0
with bytes_warning():
0 == b'\0'
with bytes_warning():
b'\0' != 0
with bytes_warning():
0 != b'\0'
# Optimizations:
# __iter__? (optimization)
# __reversed__? (optimization)
# XXX More string methods? (Those that don't use character properties)
# There are tests in string_tests.py that are more
# comprehensive for things like split, partition, etc.
# Unfortunately they are all bundled with tests that
# are not appropriate for bytes
# I've started porting some of those into bytearray_tests.py, we should port
# the rest that make sense (the code can be cleaned up to use modern
# unittest methods at the same time).
class BytearrayPEP3137Test(unittest.TestCase,
test.buffer_tests.MixinBytesBufferCommonTests):
def marshal(self, x):
return bytearray(x)
def test_returns_new_copy(self):
val = self.marshal(b'1234')
# On immutable types these MAY return a reference to themselves
# but on mutable types like bytearray they MUST return a new copy.
for methname in ('zfill', 'rjust', 'ljust', 'center'):
method = getattr(val, methname)
newval = method(3)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
methname+' returned self on a mutable object')
for expr in ('val.split()[0]', 'val.rsplit()[0]',
'val.partition(b".")[0]', 'val.rpartition(b".")[2]',
'val.splitlines()[0]', 'val.replace(b"", b"")'):
newval = eval(expr)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
expr+' returned val on a mutable object')
sep = self.marshal(b'')
newval = sep.join([val])
self.assertEqual(val, newval)
self.assertIsNot(val, newval)
class FixedStringTest(test.string_tests.BaseTest):
def fixtype(self, obj):
if isinstance(obj, str):
return obj.encode("utf-8")
return super().fixtype(obj)
class ByteArrayAsStringTest(FixedStringTest, unittest.TestCase):
type2test = bytearray
contains_bytes = True
class BytesAsStringTest(FixedStringTest, unittest.TestCase):
type2test = bytes
contains_bytes = True
class SubclassTest:
def test_basic(self):
self.assertTrue(issubclass(self.subclass2test, self.type2test))
self.assertIsInstance(self.subclass2test(), self.type2test)
a, b = b"abcd", b"efgh"
_a, _b = self.subclass2test(a), self.subclass2test(b)
# test comparison operators with subclass instances
self.assertTrue(_a == _a)
self.assertTrue(_a != _b)
self.assertTrue(_a < _b)
self.assertTrue(_a <= _b)
self.assertTrue(_b >= _a)
self.assertTrue(_b > _a)
self.assertTrue(_a is not a)
# test concat of subclass instances
self.assertEqual(a + b, _a + _b)
self.assertEqual(a + b, a + _b)
self.assertEqual(a + b, _a + b)
# test repeat
self.assertTrue(a*5 == _a*5)
def test_join(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
s1 = self.subclass2test(b"abcd")
s2 = self.type2test().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is self.type2test, type(s2))
# Test reverse, calling join on subclass
s3 = s1.join([b"abcd"])
self.assertTrue(type(s3) is self.type2test)
def test_pickle(self):
a = self.subclass2test(b"abcd")
a.x = 10
a.y = self.subclass2test(b"efgh")
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
b = pickle.loads(pickle.dumps(a, proto))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_copy(self):
a = self.subclass2test(b"abcd")
a.x = 10
a.y = self.subclass2test(b"efgh")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
class ByteArraySubclass(bytearray):
pass
class BytesSubclass(bytes):
pass
class OtherBytesSubclass(bytes):
pass
class ByteArraySubclassTest(SubclassTest, unittest.TestCase):
type2test = bytearray
subclass2test = ByteArraySubclass
def test_init_override(self):
class subclass(bytearray):
def __init__(me, newarg=1, *args, **kwargs):
bytearray.__init__(me, *args, **kwargs)
x = subclass(4, b"abcd")
x = subclass(4, source=b"abcd")
self.assertEqual(x, b"abcd")
x = subclass(newarg=4, source=b"abcd")
self.assertEqual(x, b"abcd")
class BytesSubclassTest(SubclassTest, unittest.TestCase):
type2test = bytes
subclass2test = BytesSubclass
if __name__ == "__main__":
unittest.main()
| 38.933644 | 765 | 0.565423 |
336324e85fb6c3dca86a6ccef49f0a6aa307895f | 349 | py | Python | 18/ex18.py | sdwebster/learn-python-the-hard-way-solutions | 748a3b8dea69a2ff24e69fc6318b0be3da3fc00b | [
"MIT"
] | null | null | null | 18/ex18.py | sdwebster/learn-python-the-hard-way-solutions | 748a3b8dea69a2ff24e69fc6318b0be3da3fc00b | [
"MIT"
] | null | null | null | 18/ex18.py | sdwebster/learn-python-the-hard-way-solutions | 748a3b8dea69a2ff24e69fc6318b0be3da3fc00b | [
"MIT"
] | null | null | null | def print_two(*args):
arg1, arg2 = args
print "arg1: %r, arg2: %r" % (arg1, arg2)
def print_two_again(arg1, arg2):
print "arg1: %r, arg2: %r" % (arg1, arg2)
def print_one(arg1):
print "arg1: %r" % (arg1)
def print_none():
print "I got nothing."
print_two('Zed', 'Shaw')
print_two_again('Zed', 'Shaw')
print_one('First!')
print_none()
| 18.368421 | 43 | 0.633238 |
d71aa1bc1e1d2d1ca1d6e4dc1195606236173fca | 8,824 | py | Python | kauffman/data/helpers/qwi_helpers.py | KAstev/downwardata | cf57c206e10a0668970b51e2e23110a0ca1af0df | [
"MIT"
] | null | null | null | kauffman/data/helpers/qwi_helpers.py | KAstev/downwardata | cf57c206e10a0668970b51e2e23110a0ca1af0df | [
"MIT"
] | null | null | null | kauffman/data/helpers/qwi_helpers.py | KAstev/downwardata | cf57c206e10a0668970b51e2e23110a0ca1af0df | [
"MIT"
] | null | null | null | import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import os
import time
import requests
import pandas as pd
from itertools import product
from kauffman import constants as c
from webdriver_manager.chrome import ChromeDriverManager
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
pd.set_option('max_columns', 1000)
pd.set_option('max_info_columns', 1000)
pd.set_option('expand_frame_repr', False)
pd.set_option('display.max_rows', 30000)
pd.set_option('max_colwidth', 4000)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
def _region_year_lst(obs_level, state_list):
# years = list(range(max(start_year, 2000), min(end_year, 2019) + 1))
# todo: make this programmatic
years = list(range(2000, 2021))
if obs_level in ['state', 'county']:
return list(product(state_list, years))
elif obs_level == 'msa':
msa_dic_items = c.msa_fips_state_fips_dic.items()
msa_states = [(k, s) for k, states in msa_dic_items for s in states if s in state_list]
return list(product(msa_states, years))
def _build_strata_url(strata):
url_section = ''
if 'firmage' in strata:
for f in range(0,6):
url_section = url_section + f'&firmage={f}'
if 'firmsize' in strata:
for f in range(0,6):
url_section = url_section + f'&firmsize={f}'
if 'sex' in strata:
url_section = url_section + '&sex=0&sex=1&sex=2'
if 'industry' in strata:
for i in [11, 21, 22, 23, 42, 51, 52, 53, 54, 55, 56, 61, 62, 71, 72, 81, 92]:
url_section = url_section + f'&industry={i}'
return url_section
def _build_url(fips, year, region, bds_key, firm_strat):
base_url = 'https://api.census.gov/data/timeseries/qwi/sa?'
var_list = 'Emp,EmpEnd,EmpS,EmpTotal,EmpSpv,HirA,HirN,HirR,Sep,HirAEnd,SepBeg,HirAEndRepl,' + \
'HirAEndR,SepBegR,HirAEndReplr,HirAs,HirNs,SepS,SepSnx,TurnOvrS,FrmJbGn,FrmJbLs,FrmJbC,' + \
'FrmJbGnS,FrmJbLsS,FrmJbCS,EarnS,EarnBeg,EarnHirAS,EarnHirNS,EarnSepS,Payroll'
strata_section = _build_strata_url(firm_strat)
if region == 'msa':
# for_region = 'for=metropolitan%20statistical%20area/micropolitan%20statistical%20area:*&in=state:{0}'.format(state)
for_region = f'for=metropolitan%20statistical%20area/micropolitan%20statistical%20area:{fips[0]}&in=state:{fips[1]}'
elif region == 'county':
for_region = f'for=county:*&in=state:{fips}'
else:
for_region = f'for=state:{fips}'
return '{0}get={1}&{2}&time={3}&ownercode=A05{4}&key={5}'. \
format(base_url, var_list, for_region, year, strata_section, bds_key)
def _build_df_header(df):
df.columns = df.iloc[0]
return df[1:]
def _fetch_from_url(url):
r = requests.get(url)
try:
df = pd.DataFrame(r.json()).pipe(_build_df_header)
print('Success', end=' ')
# return pd.DataFrame(r.json()).pipe(lambda x: x.rename(columns=dict(zip(x.columns, x.iloc[0]))))[1:]
# essentially the same as above; the rename function does not, apparently, give access to df
except:
print('Fail', r, url)
df = pd.DataFrame()
return df
def _county_msa_state_fetch_data(obs_level, state_list, strata):
print('\tQuerying the Census QWI API...')
return pd.concat(
[
_fetch_from_url(
_build_url(syq[0], syq[1], obs_level, os.getenv('BDS_KEY'), strata),
)
for syq in _region_year_lst(obs_level, state_list) #[-40:]
]
)
def _us_fetch_data_all(private, strat):
# print('\tFiring up selenium extractor...')
pause1 = 1
pause2 = 3
chrome_options = Options()
# chrome_options.add_argument('--headless')
driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)
driver.get('https://ledextract.ces.census.gov/static/data.html')
# Geography
# print('\tGeography tab...')
time.sleep(pause1)
driver.find_element_by_id('continue_with_selection_label').click()
# Firm Characteristics
# print('\tFirm Characteristics tab...')
if private:
driver.find_element_by_id('dijit_form_RadioButton_4').click()
if any(x in ['firmage', 'firmsize'] for x in strat):
for box in range(0, 6):
driver.find_element_by_id('dijit_form_CheckBox_{}'.format(box)).click()
# time.sleep(pause1)
# time.sleep(pause1)
if 'industry' in strat:
elems = driver.find_elements_by_xpath("//a[@href]")[12]
driver.execute_script("arguments[0].click();", elems)
driver.find_element_by_id('continue_to_worker_char').click()
# Worker Characteristics
# print('\tWorker Characteristics tab...')
if 'sex' in strat:
# driver.find_element_by_id('dijit_form_CheckBox_12').click()
driver.find_element_by_id('dijit_form_CheckBox_13').click()
driver.find_element_by_id('dijit_form_CheckBox_14').click()
driver.find_element_by_id('continue_to_indicators').click()
# Indicators
# print('\tIndicators tab...')
for _ in range(0, 3):
driver.find_element_by_class_name('ClosedGroup').click()
time.sleep(pause2)
for box in range(19, 50):
driver.find_element_by_id('dijit_form_CheckBox_{}'.format(box)).click()
# time.sleep(pause1)
driver.find_element_by_id('continue_to_quarters').click()
# Quarters
# print('\tQuarters tab...')
for quarter in range(1, 5):
driver.find_element_by_xpath('//*[@title="Check All Q{}"]'.format(quarter)).click()
# time.sleep(pause1)
driver.find_element_by_id('continue_to_export').click()
# Summary and Export
time.sleep(pause2)
driver.find_element_by_id('submit_request').click()
try:
element = WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.LINK_TEXT, 'CSV')))
finally:
href = driver.find_element_by_link_text('CSV').get_attribute('href')
return pd.read_csv(href)
def _annualizer(df, annualize, covars):
if not annualize:
return df
elif annualize == 'March':
df = df.\
assign(
quarter=lambda x: x['time'].str[-1:],
time=lambda x: x.apply(lambda y: int(y['time'][:4]) - 1 if y['quarter'] == '1' else int(y['time'][:4]), axis=1)
).\
astype({'time': 'str'}).\
drop('quarter', 1)
else:
df = df. \
assign(
time=lambda x: x['time'].str[:4],
)
return df. \
assign(
row_count=lambda x: x['fips'].groupby([x[var] for var in covars]).transform('count')
). \
query('row_count == 4'). \
drop(columns=['row_count']). \
groupby(covars).apply(lambda x: pd.DataFrame.sum(x.set_index(covars), skipna=False)).\
reset_index(drop=False)
def _qwi_data_create(indicator_lst, region, state_list, private, annualize, strata):
# todo: need to sort out the by_age, by_size, private, and strata keywords
covars = ['time', 'fips', 'ownercode'] + strata
if region == 'state':
df = _county_msa_state_fetch_data(region, state_list, strata). \
astype({'state': 'str'}). \
rename(columns={'state': 'fips'})
elif region == 'county':
df = _county_msa_state_fetch_data(region, state_list, strata). \
assign(fips=lambda x: x['state'].astype(str) + x['county'].astype(str)). \
drop(['state', 'county'], 1)
elif region == 'msa':
df = _county_msa_state_fetch_data(region, state_list, strata). \
rename(columns={'metropolitan statistical area/micropolitan statistical area': 'fips'}). \
drop('state', 1)
elif region == 'us':
df = _us_fetch_data_all(private, strata). \
assign(
time=lambda x: x['year'].astype(str) + '-Q' + x['quarter'].astype(str),
fips='00'
). \
rename(columns={'geography': 'region', 'HirAS': 'HirAs', 'HirNS': 'HirNs'}) # \
return df. \
apply(pd.to_numeric, errors='ignore'). \
pipe(_annualizer, annualize, covars).\
sort_values(covars).\
reset_index(drop=True) \
[covars + indicator_lst]
# todo: maybe put the msa combiner in the msa block above
# return df. \
# reset_index(drop=True). \
# astype(dict(zip(indicator_lst, ['float'] * len(indicator_lst)))). \
# pipe(_msa_combiner if region == 'msa' else lambda x: x). \
# pipe(_annualizer, annualize, strata)
# todo: print statements etc. | 37.232068 | 127 | 0.637126 |
72a137c77783b38cb077d0de32c4aa655ee2bb1f | 437 | py | Python | code/exampleStrats/grimEvery2.py | MissingAssignments/PrisonersDilemmaTournament | a62c5c6df51977eb361b67e7f630570996eb1661 | [
"MIT"
] | null | null | null | code/exampleStrats/grimEvery2.py | MissingAssignments/PrisonersDilemmaTournament | a62c5c6df51977eb361b67e7f630570996eb1661 | [
"MIT"
] | null | null | null | code/exampleStrats/grimEvery2.py | MissingAssignments/PrisonersDilemmaTournament | a62c5c6df51977eb361b67e7f630570996eb1661 | [
"MIT"
] | null | null | null | def strategy(history, memory):
wronged = False
if not memory:
memory = (False, 0)
if memory[0] is not None and memory[0]: # Has memory that it was already wronged.
wronged = True
else: # Has not been wronged yet, historically.
if history.shape[1] >= 1 and history[1,-1] == 0: # Just got wronged.
wronged = True
if wronged:
if not memory[1] % 2 == 0: # 3
return 0, (True, memory[0]+1)
return 1, (False, memory[0]+1)
| 27.3125 | 82 | 0.647597 |
0f6244e366414b57a1c8f3e3369321ca5e33d618 | 21,424 | py | Python | tensor2tensor/models/image_transformer.py | cshanbo/tensor2tensor | 346a27b10fd5750e171f26290766e7f71c3bfcb5 | [
"Apache-2.0"
] | 5 | 2019-03-28T03:52:32.000Z | 2021-02-24T07:09:26.000Z | tensor2tensor/models/image_transformer.py | cshanbo/tensor2tensor | 346a27b10fd5750e171f26290766e7f71c3bfcb5 | [
"Apache-2.0"
] | null | null | null | tensor2tensor/models/image_transformer.py | cshanbo/tensor2tensor | 346a27b10fd5750e171f26290766e7f71c3bfcb5 | [
"Apache-2.0"
] | 2 | 2018-08-07T03:43:09.000Z | 2019-12-09T06:41:40.000Z | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""image generation with transformer (attention).
encoder: [Self-Attention, Feed-forward] x n
decoder: [Self-Attention, Source-Target-Attention, Feed-forward] x n
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
# Dependency imports
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_image_attention as cia
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
@registry.register_model
class Imagetransformer(t2t_model.T2TModel):
"""Conditional image generation with attention. See file docstring."""
def body(self, features):
hparams = copy.copy(self._hparams)
inputs = features["inputs"]
targets = features["targets"]
if not (tf.get_variable_scope().reuse or
hparams.mode == tf.contrib.learn.ModeKeys.INFER):
tf.summary.image("targets", targets, max_outputs=1)
# Prepare decoder inputs and bias.
decoder_input, rows, cols = cia.prepare_decoder(targets, hparams)
# Add class label to decoder input.
if not hparams.unconditional:
decoder_input += tf.reshape(
inputs,
[common_layers.shape_list(targets)[0], 1, 1, hparams.hidden_size])
decoder_output = cia.transformer_decoder_layers(
decoder_input,
None,
hparams.num_decoder_layers or hparams.num_hidden_layers,
hparams,
attention_type=hparams.dec_attention_type,
name="decoder")
output = cia.create_output(decoder_output, rows, cols, targets, hparams)
return output
@registry.register_model
class ImagetransformerMoe(t2t_model.T2TModel):
"""Conditional image generation with attention and MoE."""
@property
def use_body_sharded(self):
return True
def body_sharded(self, sharded_features):
dp = self._data_parallelism
hparams = copy.copy(self._hparams)
inputs = sharded_features["inputs"]
targets = sharded_features["targets"]
# Determine attention type and padding from hparams.
q_padding, kv_padding = "VALID", "VALID"
if hparams.q_filter_width > 1:
q_padding = "LEFT"
if hparams.kv_filter_width > 1:
kv_padding = "LEFT"
# Prepare decoder inputs and bias.
decoder_input, rows, cols = dp(cia.prepare_decoder_inputs,
inputs, targets, hparams)
# Run decoder.
decoder_output, extra_loss = cia.transformer_layers_sharded(
dp,
self._ps_devices,
decoder_input,
hparams.num_hidden_layers,
hparams,
self_attention_bias=None,
enc_output=None,
attention_type=hparams.dec_attention_type,
q_padding=q_padding,
kv_padding=kv_padding,
name="decoder")
output = dp(cia.create_output, decoder_output, rows, cols, targets, hparams)
return output, extra_loss
@registry.register_hparams
def image_transformer_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.batch_size = 1
hparams.max_length = 3075
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 0.2
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.label_smoothing = 0.0
hparams.target_modality = "image:identity"
hparams.norm_type = "layer"
hparams.layer_prepostprocess_dropout = 0.0
hparams.add_hparam("filter_size", 512) # Add new ones like this.
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("num_output_layers", 3)
hparams.add_hparam("block_size", 1)
# dilated attention based flags
hparams.add_hparam("gap_sizes", [2, 4, 8, 16, 32, 64, 2, 4, 8, 16, 32, 64])
# image size related flags
# assuming that the image has same height and width
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
# Local attention params
hparams.add_hparam("local_and_global_att", False)
hparams.add_hparam("block_length", 256)
hparams.add_hparam("block_width", 128)
hparams.add_hparam("num_encoder_layers", 4)
hparams.add_hparam("num_decoder_layers", 12)
hparams.sep_rgb_embed = False
hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_1D)
hparams.add_hparam("block_rastor_scan", False)
# multipos attention params
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
hparams.add_hparam("unconditional", False) # unconditional generation
return hparams
@registry.register_hparams
def imagetransformer_base():
hparams = image_transformer_base()
return hparams
@registry.register_hparams
def imagetransformer_sep_channels():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.num_heads = 4
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 256
hparams.filter_size = 512
hparams.num_hidden_layers = 6
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.num_heads = 4
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 256
hparams.filter_size = 256
hparams.num_hidden_layers = 8
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_multipos3():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l()
hparams.q_filter_width = 3
hparams.kv_filter_width = 3
return hparams
@registry.register_hparams
def imagetransformer_sep_output_channels_8l():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l()
hparams.sep_rgb_embed = True
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan():
"""big 1d model for conditional image generation.2.99 on cifar10."""
hparams = imagetransformer_sep_channels_8l()
hparams.block_width = 256
hparams.block_length = 256
hparams.hidden_size = 512
hparams.num_heads = 8
hparams.filter_size = 2048
hparams.batch_size = 4
hparams.max_length = 3075
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.num_decoder_layers = 8
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64():
"""big 1d model for unconditional generation on imagenet."""
hparams = imagetransformer_base_10l_8h_big_cond_dr03_dan()
hparams.unconditional = True
hparams.max_length = 14000
hparams.batch_size = 1
hparams.img_len = 64
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_128():
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan()
hparams.block_width = 128
hparams.block_length = 128
return hparams
@registry.register_hparams
def imagetransformer_base_10l_8h_big_cond_dr03_dan():
"""Best conditional Cifar10 gen param."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan()
hparams.num_decoder_layers = 10
return hparams
@registry.register_hparams
def imagetransformer_base_10l_8h_big_uncond_dr03_dan():
"""Best unconditional Cifar10 gen param."""
hparams = imagetransformer_base_10l_8h_big_cond_dr03_dan()
hparams.num_decoder_layers = 10
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan()
hparams.gap_sizes = [0, 16, 64, 0, 16, 64, 128, 0]
hparams.dec_attention_type = cia.AttentionType.DILATED
hparams.block_length = 128
hparams.block_width = 128
hparams.add_hparam("num_memory_blocks", 1)
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_b():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated()
hparams.block_width = 64
hparams.num_memory_blocks = 2
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_c():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated()
hparams.block_width = 32
hparams.num_memory_blocks = 4
return hparams
@registry.register_hparams
def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_d():
"""Dilated hparams."""
hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated()
hparams.gap_sizes = [0, 16, 64, 16, 64, 128, 256, 0]
return hparams
@registry.register_hparams
def imagetransformer_base_12l_8h_big():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.filter_size = 1024
hparams.num_decoder_layers = 12
hparams.batch_size = 1
hparams.hidden_size = 512
hparams.learning_rate_warmup_steps = 4000
hparams.sampling_method = "random"
hparams.beam_size = 1
hparams.block_width = 256
return hparams
@registry.register_hparams
def imagetransformer1d_base_8l_64by64():
"""hparams fo 12 layer big 1d model for imagenet 64x64."""
hparams = image_transformer_base()
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_decoder_layers = 8
hparams.batch_size = 1
hparams.block_length = 512
hparams.block_width = 768
hparams.layer_prepostprocess_dropout = 0.1
hparams.max_length = 14000
hparams.unconditional = int(False)
return hparams
@registry.register_hparams
def imagetransformer1d_base_12l_64by64():
"""hparams fo 12 layer big 1d model for imagenet 64x64."""
hparams = image_transformer_base()
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_decoder_layers = 12
hparams.batch_size = 1
hparams.block_length = 512
hparams.block_width = 768
hparams.layer_prepostprocess_dropout = 0.1
hparams.max_length = 14000
hparams.unconditional = int(False)
return hparams
@registry.register_hparams
def imagetransformer_base_14l_8h_big():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_12l_8h_big()
hparams.num_decoder_layers = 14
return hparams
@registry.register_hparams
def imagetransformer_base_14l_8h_big_dr01():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big()
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_base_12l_8h_big_uncond():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_12l_8h_big()
hparams.unconditional = True
return hparams
@registry.register_hparams
def imagetransformer_base_14l_8h_big_uncond():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_12l_8h_big_uncond()
hparams.num_decoder_layers = 14
return hparams
@registry.register_hparams
def imagetransformer_base_14l_8h_big_uncond_dr01():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_uncond()
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_12l_16h_imagenet_large():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_hidden_layers = 12
hparams.batch_size = 1
hparams.filter_size = 2048
hparams.num_heads = 16
hparams.learning_rate_warmup_steps = 16000
hparams.sampling_method = "random"
hparams.learning_rate = 0.1
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 256
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc_128():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_12l_16h_imagenet_large()
hparams.num_hidden_layers = 16
hparams.local_attention = True
hparams.batch_size = 1
hparams.block_length = 128
return hparams
@registry.register_hparams
def imagetransformer_sep_output_channels_8l_local_and_global_att():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l()
hparams.sep_rgb_embed = True
hparams.sampling_method = "random"
hparams.local_and_global_att = True
return hparams
@registry.register_hparams
def imagetransformer_base_10l_16h_big_uncond_dr01_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_uncond_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_base_10l_16h_big_dr01_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_uncond_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 10
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.batch_size = 1
hparams.unconditional = False
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_8h():
"""separate rgb embeddings."""
hparams = imagetransformer_base()
hparams.num_heads = 8
hparams.batch_size = 1
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 512
hparams.filter_size = 512
hparams.num_hidden_layers = 8
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_10l_8h():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_hidden_layers = 8
hparams.learning_rate_warmup_steps = 16000
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_12l_8h():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_hidden_layers = 12
hparams.batch_size = 2
hparams.learning_rate_warmup_steps = 16000
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_12l_8h_nda():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_hidden_layers = 12
hparams.batch_size = 2
hparams.learning_rate_warmup_steps = 16000
hparams.sampling_method = "random"
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_12l_8h_4k():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_hidden_layers = 12
hparams.batch_size = 2
hparams.learning_rate_warmup_steps = 4000
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_12l_8h_sep_rgb():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_hidden_layers = 12
hparams.batch_size = 2
hparams.learning_rate_warmup_steps = 16000
hparams.sep_rgb_embed = True
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_8h_local_and_global_att():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_heads = 8
hparams.batch_size = 1
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.hidden_size = 256
hparams.filter_size = 256
hparams.num_hidden_layers = 4
hparams.sampling_method = "random"
hparams.local_and_global_att = True
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_self_att_ffn():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l()
hparams.num_parts = 4
hparams.ffn_layer = "self_attention_ffn"
hparams.share_kv = True
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_glu_ffn():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l()
hparams.ffn_layer = "glu_ffn"
return hparams
@registry.register_hparams
def imagetransformer_bas8l_8h_big_uncond_dr03_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_14l_8h_big_uncond_dr01()
# num_hidden_layers
hparams.num_decoder_layers = 8
hparams.num_heads = 8
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def imagetransformer_tiny():
hparams = imagetransformer_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 64
hparams.batch_size = 1
return hparams
@registry.register_hparams
def imagetransformer_tiny_tpu():
hparams = imagetransformer_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 16
hparams.batch_size = 2
hparams.num_heads = 2
return hparams
@registry.register_hparams
def imagetransformer_base_10l_16h_big_dr01_moe_imgnet():
"""big 1d model for conditional image generation."""
hparams = imagetransformer_base_10l_16h_big_dr01_imgnet()
hparams.initializer = "orthogonal"
hparams.learning_rate_warmup_steps = 16000
hparams.add_hparam("moe_layers_decoder", "2,7") # Which layer is MoE.
hparams.moe_hidden_sizes = "4096" # Hidden layer sizes (comma-separated).
hparams.moe_num_experts = 64 # Number of experts in each MoE layer.
hparams.moe_k = 4 # How many experts to use per batch element (try 2 or 4).
hparams.moe_loss_coef = 3e-2 # MoE loss coefficient (1e-2 is usually ok).
hparams.scheduled_sampling_prob = 0.1
hparams.scheduled_sampling_warmup_steps = 200000
return hparams
@registry.register_hparams
def imagetransformer_moe_tiny():
"""Set of hyperparameters for a very small imagetransformer with MoE."""
hparams = imagetransformer_tiny()
hparams.hidden_size = 64
hparams.batch_size = 1
hparams.num_hidden_layers = 3
hparams.dec_attention_type = cia.AttentionType.MOE_LOCAL_1D
hparams.add_hparam("moe_layers_decoder", "1") # Which layer is MoE.
hparams.moe_hidden_sizes = "1024" # Hidden layer sizes (comma-separated).
hparams.moe_num_experts = 16 # Number of experts in each MoE layer.
hparams.moe_k = 2 # How many experts to use per batch element (try 2 or 4).
hparams.moe_loss_coef = 1e-2 # MoE loss coefficient (1e-2 is usually ok).
return hparams
def update_hparams_for_tpu(hparams):
hparams.use_pad_remover = False # where op not supported
hparams.optimizer = "TrueAdam"
hparams.batch_size = 4
@registry.register_hparams
def imagetransformer_base_tpu():
hparams = imagetransformer_base()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.hidden_size = 256
hparams.filter_size = 512
hparams.num_hidden_layers = 8
hparams.sampling_method = "random"
return hparams
@registry.register_hparams
def imagetransformer_sep_channels_8l_tpu():
"""Hparams for training imagetransformer on tpu."""
hparams = imagetransformer_sep_channels_8l()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4 # heads are expensive on tpu
hparams.shared_embedding_and_softmax_weights = False
return hparams
@registry.register_hparams
def imagetransformer_bas8l_8h_big_uncond_dr03_imgnet_tpu():
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 1
hparams.num_heads = 8 # heads are expensive on tpu
return hparams
| 31.139535 | 80 | 0.776326 |
77e69e1f10cb0322b8e7e7addde203b292596e77 | 2,134 | py | Python | openpyxlzip/chart/legend.py | ankitJoshi03/openpyxlzip | f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647 | [
"MIT"
] | null | null | null | openpyxlzip/chart/legend.py | ankitJoshi03/openpyxlzip | f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647 | [
"MIT"
] | null | null | null | openpyxlzip/chart/legend.py | ankitJoshi03/openpyxlzip | f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647 | [
"MIT"
] | null | null | null | # Copyright (c) 2010-2020 openpyxlzip
from openpyxlzip.descriptors.serialisable import Serialisable
from openpyxlzip.descriptors import (
Typed,
Integer,
Alias,
Sequence,
)
from openpyxlzip.descriptors.excel import ExtensionList
from openpyxlzip.descriptors.nested import (
NestedBool,
NestedSet,
NestedInteger
)
from .layout import Layout
from .shapes import GraphicalProperties
from .text import RichText
class LegendEntry(Serialisable):
tagname = "legendEntry"
idx = NestedInteger()
delete = NestedBool()
txPr = Typed(expected_type=RichText, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('idx', 'delete', 'txPr', 'extLst',)
def __init__(self,
idx=0,
delete=False,
txPr=None,
extLst=None,
):
self.idx = idx
self.delete = delete
self.txPr = txPr
self.extLst = extLst
class Legend(Serialisable):
tagname = "legend"
legendPos = NestedSet(values=(['b', 'tr', 'l', 'r', 't']))
position = Alias('legendPos')
legendEntry = Sequence(expected_type=LegendEntry)
layout = Typed(expected_type=Layout, allow_none=True)
overlay = NestedBool(allow_none=True)
spPr = Typed(expected_type=GraphicalProperties, allow_none=True)
graphicalProperties = Alias('spPr')
txPr = Typed(expected_type=RichText, allow_none=True)
textProperties = Alias('txPr')
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('legendPos', 'legendEntry', 'layout', 'overlay', 'spPr', 'txPr', 'extLst',)
def __init__(self,
legendPos="r",
legendEntry=(),
layout=None,
overlay=None,
spPr=None,
txPr=None,
extLst=None,
):
self.legendPos = legendPos
self.legendEntry = legendEntry
self.layout = layout
self.overlay = overlay
self.spPr = spPr
self.txPr = txPr
self.extLst = extLst
| 27.358974 | 95 | 0.617619 |
f79fb445df346c6f39cee6e1a1b5694aade732cb | 12,197 | py | Python | tcex/bin/spec_tool_readme_md.py | GShepherdTC/tcex | 70b1199b8bb9e63f53e2ba792489267108c909cd | [
"Apache-2.0"
] | null | null | null | tcex/bin/spec_tool_readme_md.py | GShepherdTC/tcex | 70b1199b8bb9e63f53e2ba792489267108c909cd | [
"Apache-2.0"
] | null | null | null | tcex/bin/spec_tool_readme_md.py | GShepherdTC/tcex | 70b1199b8bb9e63f53e2ba792489267108c909cd | [
"Apache-2.0"
] | null | null | null | """TcEx Generate Configurations CLI Command"""
# standard library
from typing import TYPE_CHECKING, List, Optional
# first-party
from tcex.app_config.permutation import Permutation
from tcex.bin.bin_abc import BinABC
if TYPE_CHECKING:
# first-party
from tcex.app_config import AppSpecYml
from tcex.app_config.install_json import ParamsModel
from tcex.app_config.models.app_spec_yml_model import SectionsModel
class SpecToolReadmeMd(BinABC):
"""Generate App Config File"""
def __init__(self, asy: 'AppSpecYml') -> None:
"""Initialize class properties."""
super().__init__()
self.asy = asy
# properties
self.i1 = ' ' * 2
self.filename = 'README.md'
self.permutations = Permutation(self.log)
@staticmethod
def _add_actions_title(readme_md: List[str]) -> None:
"""Add title for action section."""
readme_md.append('# Actions')
readme_md.append('')
readme_md.append('---')
readme_md.append('')
def _add_actions_sub_title(self, readme_md: List[str], action: str) -> None:
"""Add title for sub action section."""
readme_md.append(f'## {action}')
npa = self.asy.model.get_note_per_action(action).note
if npa is not None:
readme_md.append(self.asy.model.get_note_per_action(action).note)
readme_md.append('')
def _add_labels(self, readme_md: List[str]) -> None:
"""Add labels data to readme.md."""
if self.asy.model.labels:
readme_md.append('# Labels')
readme_md.append('')
_labels = ', '.join(sorted(self.asy.model.labels))
readme_md.append(f'- {_labels}')
def _add_description(self, readme_md: List[str]) -> None:
"""Add top level description/note data to readme.md."""
if self.asy.model.note:
readme_md.append('# Description')
readme_md.append('')
readme_md.append(self.asy.model.note)
readme_md.append('')
if self.asy.model.note_per_action:
readme_md.append('\n\n'.join(self.asy.model.note_per_action_formatted))
readme_md.append('')
@staticmethod
def _add_inputs_title(readme_md: List[str], header: int) -> None:
"""Add title for input section."""
header_value = '#' * header
readme_md.append(f'{header_value} Inputs')
readme_md.append('')
@staticmethod
def _add_service_config_title(readme_md: List[str], header: int) -> None:
"""Add title for service configuration section."""
header_value = '#' * header
readme_md.append(f'{header_value} Service Configuration')
readme_md.append('')
def _add_param(self, readme_md: List[str], param: 'ParamsModel') -> None:
"""Add params data to readme.md.
**API Key** _(String)_
_**Duration**_ _(String, Optional)_
"""
label = f'**{param.label}**'
type_data = f'{param.type}'
if param.required is False:
# change the format of the label name to italics if it is optional
if param.type.lower() not in ['boolean']:
label = f'_{label}_'
type_data += ', Optional'
if param.default is not None:
# following current format where boolean values are shown as
# selected/unselected and others true/false
if param.type.lower() == 'boolean':
default_value = 'Selected' if param.default is True else 'Unselected'
else:
default_value = param.default
type_data += f''', Default: {str(default_value).replace('|', ', ')}'''
readme_md.append(f'{self.i1}{label} _({type_data})_')
readme_md.append('')
def _add_params(
self, readme_md: List[str], section: 'SectionsModel', action: Optional[str] = None
) -> None:
# add params
for param in section.params:
if param.disabled is True or param.hidden is True:
continue
# don't add tc_action param since it's the top level action
if param.name == 'tc_action':
continue
if action is not None:
# validate that the input is valid for the current action
if self._valid_param_for_action(param, action) is False:
continue
# add param data
self._add_param(readme_md, param)
# add param note data
self._add_param_note(readme_md, param)
# add param playbook data types data
self._add_param_pb_data_type(readme_md, param)
# add param valid_values data
self._add_param_valid_values(readme_md, param)
def _add_param_note(self, readme_md: List[str], param: 'ParamsModel') -> None:
"""Add note data to readme.md."""
if param.note:
readme_md.append(f'{self.i1}{param.note}')
readme_md.append('')
def _add_param_pb_data_type(self, readme_md: List[str], param: 'ParamsModel') -> None:
"""Add playbook data types values data to readme.md."""
# matching current format where single 'String' is not displayed
if param.playbook_data_type and param.playbook_data_type != ['String']:
_pdt = ', '.join(param.playbook_data_type)
readme_md.append(f'{self.i1}> **Allows:** {_pdt}')
readme_md.append('')
def _add_param_valid_values(self, readme_md: List[str], param: 'ParamsModel') -> None:
"""Add valid values data to readme.md."""
# matching current format where TEXT and KEYCHAIN were excluded.
valid_values = [p for p in param.valid_values if not p.startswith('${')]
if valid_values:
_valid_values = ', '.join(valid_values)
readme_md.append(f'{self.i1}> **Valid Values:** {_valid_values}')
readme_md.append('')
def _add_outputs(self, readme_md: List[str], action: str = None) -> None:
"""Add output data to readme.md."""
if self.asy.model.output_variables:
readme_md.append('### Outputs')
readme_md.append('')
outputs = self.ij.model.playbook.output_variables
if action:
outputs = self.permutations.outputs_by_action(action)
for output in outputs:
readme_md.append(f'{self.i1}- {output.name} *({output.type})*')
readme_md.append('')
def _has_section_params(self, section: 'SectionsModel', action: str) -> bool:
"""Return True if the provided section has params."""
if [
sp
for sp in section.params
if sp.disabled is False
and sp.name != 'tc_action'
and self._valid_param_for_action(sp, action) is True
]:
return True
return False
def _valid_param_for_action(self, param: 'ParamsModel', action: str) -> bool:
"""Return True if param is valid for action."""
return self.permutations.validate_input_variable(
param.name,
{'tc_action': action},
self.permutations.extract_tc_action_clause(param.display),
)
@staticmethod
def _add_section_title(readme_md: List[str], section: 'SectionsModel') -> None:
"""Add title for input section."""
readme_md.append(f'### *{section.section_name}*')
readme_md.append('')
def _add_params_for_playbook_action_app(self, readme_md: List[str], actions: List[str]) -> None:
"""Add inputs for playbook action app."""
# add title for actions section
self._add_actions_title(readme_md)
for action in actions:
# add title for action sub section
self._add_actions_sub_title(readme_md, action)
# add inputs and sections
self._add_inputs_title(readme_md, 3)
for section in self.asy.model.sections:
# don't show the section if it has no params
if self._has_section_params(section, action) is False:
continue
# add section title
self._add_section_title(readme_md, section)
# add params
self._add_params(readme_md, section, action)
# add output data
self._add_outputs(readme_md, action)
# add horizontal rule
readme_md.append('---')
def _add_params_for_playbook_std_app(self, readme_md: List[str]) -> None:
"""Add inputs for playbook standard app."""
self._add_inputs_title(readme_md, 3)
for section in self.asy.model.sections:
# don't show the section if it has no params
valid_section = False
for sp in section.params:
if sp.disabled is False and sp.hidden is False:
valid_section = True
if valid_section is False:
continue
# add section title
self._add_section_title(readme_md, section)
self._add_params(readme_md, section)
# add output data
self._add_outputs(readme_md)
def _add_params_for_non_playbook_apps(self, readme_md: List[str]) -> None:
"""Add inputs for non playbook app."""
service_config = []
non_service_config = []
# Separate Params into service configuration params and other parameters
for param in self.asy.model.params:
if param.disabled is True or param.hidden is True:
continue
if param.service_config is True:
service_config.append(param)
else:
non_service_config.append(param)
# Add service configuration params to ReadMe file.
if service_config:
self._add_service_config_title(readme_md, 1)
for param in service_config:
# add param data
self._add_param(readme_md, param)
# add param note data
self._add_param_note(readme_md, param)
# add param valid_values data
self._add_param_valid_values(readme_md, param)
# add inputs and sections
self._add_inputs_title(readme_md, 3)
for param in non_service_config:
# add param data
self._add_param(readme_md, param)
# add param note data
self._add_param_note(readme_md, param)
# add param valid_values data
self._add_param_valid_values(readme_md, param)
# add output data
self._add_outputs(readme_md)
def generate(self) -> List[str]:
"""Generate the layout.json file data."""
readme_md = []
# add App Name
readme_md.append(f'# {self.asy.model.display_name}')
readme_md.append('')
# add release notes
readme_md.extend(self.asy.model.release_notes_formatted)
# add category
if self.asy.model.category:
readme_md.append('# Category')
readme_md.append('')
readme_md.append(f'- {self.asy.model.category}')
readme_md.append('')
# add description
self._add_description(readme_md)
# add inputs
if self.asy.model.runtime_level.lower() == 'playbook':
actions = self.ij.model.get_param('tc_action').valid_values or []
if actions:
# add inputs for action based sections
self._add_params_for_playbook_action_app(readme_md, actions)
else:
# add inputs for non action based sections
self._add_params_for_playbook_std_app(readme_md)
elif self.asy.model.runtime_level.lower() in [
'triggerservice',
'webhooktriggerservice',
'organization',
]:
self._add_params_for_non_playbook_apps(readme_md)
# add labels
self._add_labels(readme_md)
# add end of file newline
readme_md.append('')
return readme_md
| 36.192878 | 100 | 0.596294 |
fc58a6de0e365aa0d3ca9c156207e8752576cf96 | 1,238 | py | Python | analysis/comparing_factorization.py | michaelneuder/python_vs_cpp | dd55e36bcfba85751bf92698cc16933c1b9c9559 | [
"MIT"
] | 1 | 2017-08-07T23:35:11.000Z | 2017-08-07T23:35:11.000Z | analysis/comparing_factorization.py | michaelneuder/python_vs_cpp | dd55e36bcfba85751bf92698cc16933c1b9c9559 | [
"MIT"
] | null | null | null | analysis/comparing_factorization.py | michaelneuder/python_vs_cpp | dd55e36bcfba85751bf92698cc16933c1b9c9559 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def main():
print('\ncomparing runtime of python and c++ with prime factorization\n')
python_data = pd.io.parsers.read_csv('../python/non-graphical/data/decomp_data.csv', names=['number of input', 'runtime'], header=1)
cpp_data = pd.io.parsers.read_csv('../cpp/non-graphical/data/decomp_data.csv', names=['number of input', 'runtime'], header=1)
python_plot = np.asarray(python_data['runtime'], dtype=np.float64)
cpp_plot = np.asarray(cpp_data['runtime'], dtype=np.float64)
x_plot_python = np.asarray(python_data['number of input'], dtype=np.float64)
x_plot_cpp = np.asarray(cpp_data['number of input'], dtype=np.float64)
# plotting the data
plt.title("prime factorization")
plt.xlabel("number of input")
plt.ylabel("runtime (ms)")
plt.plot(x_plot_python, python_plot, 'g', label='python')
plt.plot(x_plot_cpp, cpp_plot, 'r', label='cpp')
plt.legend(loc=2)
plt.show()
quotient = np.asarray([python_plot[i]/cpp_plot[i] for i in range(len(python_plot))])
print("python is on average {} times slower than cpp".format(quotient.mean()))
if __name__ == '__main__':
main()
| 39.935484 | 136 | 0.695477 |
7dcfafb6bc02db42a346622fc4131d08adaaecb9 | 1,331 | py | Python | 01_fyyur/starter_code/migrations/versions/de98cf310c55_.py | silasjimmy/Fyyur-Website | 9c396bc6103a298627ed176f04dff2ac4f3b48c8 | [
"MIT"
] | null | null | null | 01_fyyur/starter_code/migrations/versions/de98cf310c55_.py | silasjimmy/Fyyur-Website | 9c396bc6103a298627ed176f04dff2ac4f3b48c8 | [
"MIT"
] | null | null | null | 01_fyyur/starter_code/migrations/versions/de98cf310c55_.py | silasjimmy/Fyyur-Website | 9c396bc6103a298627ed176f04dff2ac4f3b48c8 | [
"MIT"
] | null | null | null | """empty message
Revision ID: de98cf310c55
Revises: ee2f941a9c76
Create Date: 2021-06-06 12:14:07.942211
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'de98cf310c55'
down_revision = 'ee2f941a9c76'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('Artist',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('city', sa.String(length=120), nullable=True),
sa.Column('state', sa.String(length=120), nullable=True),
sa.Column('phone', sa.String(length=120), nullable=True),
sa.Column('genres', sa.String(length=120), nullable=True),
sa.Column('image_link', sa.String(length=500), nullable=True),
sa.Column('facebook_link', sa.String(length=120), nullable=True),
sa.Column('website_link', sa.String(length=120), nullable=True),
sa.Column('looking_for_talent', sa.Boolean(), nullable=True),
sa.Column('seeking_description', sa.String(length=200), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('Artist')
# ### end Alembic commands ###
| 31.690476 | 75 | 0.687453 |
df48c30f7a6b6a22210d923bf8d6dd17789acd67 | 4,485 | py | Python | main.py | edwinmillan/TrelloAttachmentCleanup | 6c6246bcd485cc87c9a999c1954b733356f94038 | [
"MIT"
] | null | null | null | main.py | edwinmillan/TrelloAttachmentCleanup | 6c6246bcd485cc87c9a999c1954b733356f94038 | [
"MIT"
] | null | null | null | main.py | edwinmillan/TrelloAttachmentCleanup | 6c6246bcd485cc87c9a999c1954b733356f94038 | [
"MIT"
] | null | null | null | import requests
import re
import json
import configparser
from trello import TrelloApi, Cards
from typing import List, Optional, Iterable, NoReturn
class TrelloCards(Cards):
def __init__(self, apikey, token=None):
super(TrelloCards, self).__init__(apikey, token)
def update_attachment(self, card_id_or_shortlink: str, attachment_id: str, data: dict):
resp = requests.put(f"https://trello.com/1/cards/{card_id_or_shortlink}/attachments/{attachment_id}",
params={"key": self._apikey, "token": self._token}, data=data)
return self.raise_or_json(resp)
class Trello(TrelloApi):
def __init__(self, apikey, token=None):
super(Trello, self).__init__(apikey, token)
self.cards = TrelloCards(apikey, token)
def get_target_board(trello: Trello, board_name: str) -> Optional[dict]:
my_boards = trello.members.get_board('me')
api_filter = tuple(filter(lambda b: b.get('name') == board_name, my_boards))
if api_filter:
return api_filter[0]
else:
return tuple()
def filter_target_list(board_lists: List[dict], board_name: str) -> Optional[dict]:
for board_list in board_lists:
if board_list.get('name') == board_name:
return board_list
def get_list_info(trello: Trello, api_board_info: dict, target_list_name: str) -> Optional[dict]:
board_id = api_board_info.get('id')
board_lists = trello.boards.get_list(board_id)
return filter_target_list(board_lists, target_list_name)
def remove_file_extension(file_name: str) -> str:
match = re.search(r'(.+)\.\S+', file_name)
if match:
return match[1]
else:
return file_name
def update_board_attachments(trello: Trello, board_name: str, target_list_names: Iterable) -> NoReturn:
board_info = get_target_board(trello, board_name=board_name)
if board_info:
print(f"Working on Board: {board_name}")
# Go through each list names and update each card's attachments
for list_name in target_list_names:
# Get the dict holding the list ID using the board.
list_info = get_list_info(trello=trello, api_board_info=board_info, target_list_name=list_name)
if list_info:
print(f"Working on List: {list_info.get('name')}")
list_id = list_info.get('id')
# Get the list of cards
list_cards = trello.lists.get_card(list_id)
# Iterates over each card and gets the attachments.
for card in list_cards:
print(f"\tLooking through card: {card.get('name')}")
card_id = card.get('id')
attachments = trello.cards.get_attachment(card_id)
for attachment in attachments:
attachment_id = attachment.get('id')
raw_name = attachment.get('name')
# If the name has an ext, return a version without the ext.
parsed_name = remove_file_extension(raw_name)
# If it's not already fixed, go update it via the API.
if raw_name and parsed_name != raw_name:
print(f"\t\tUpdating attachment: {raw_name} -> {parsed_name}")
payload = {'name': parsed_name}
trello.cards.update_attachment(card_id_or_shortlink=card_id,
attachment_id=attachment_id, data=payload)
else:
print('No Board info found')
def load_credentials(credential_json: str) -> (str, str):
with open(credential_json, 'r') as cred_file:
creds = json.load(cred_file)
return creds.get('key'), creds.get('token')
def load_config_settings(config_filename: str) -> (str, Iterable):
config = configparser.ConfigParser()
config.read(config_filename)
settings = config['settings']
target_board_name = settings['board_name']
target_list_names = map(str.strip, settings['list_names'].split(','))
return target_board_name, target_list_names
def main() -> NoReturn:
key, token = load_credentials('token.json')
target_board_name, target_list_names = load_config_settings(config_filename='config.ini')
trello = Trello(apikey=key, token=token)
update_board_attachments(trello, target_board_name, target_list_names)
if __name__ == '__main__':
main()
| 39.690265 | 109 | 0.64058 |
cb1333f60ee3f386a49d3fd49c6ed4f4f16f9f12 | 978 | py | Python | assignment.py | sylvaingchassang/experiment-design | d8f7f9630579835bf9ca35ea5d182327a6ddaaab | [
"MIT"
] | 2 | 2020-01-07T18:45:13.000Z | 2020-01-17T04:14:44.000Z | assignment.py | sylvaingchassang/experiment-design | d8f7f9630579835bf9ca35ea5d182327a6ddaaab | [
"MIT"
] | 2 | 2019-09-26T07:02:10.000Z | 2019-09-26T11:19:15.000Z | assignment.py | sylvaingchassang/experiment-design | d8f7f9630579835bf9ca35ea5d182327a6ddaaab | [
"MIT"
] | 1 | 2019-11-15T19:50:29.000Z | 2019-11-15T19:50:29.000Z | import numpy as np
from numbers import Number
from random import shuffle, seed
from functools import reduce
from operator import add
def clean_weights(weights):
if isinstance(weights, Number):
weights = [weights]
if sum(weights) < 1:
weights = [1 - sum(weights)] + weights
return weights
def get_assignments_as_positions(assignment):
assignment = np.array(assignment)
return [np.where(assignment == i)[0]
for i in range(np.max(assignment))]
def draw_iid_assignment(weights, sample_size):
weights = clean_weights(weights)
return np.random.choice(
range(len(weights)), size=sample_size, replace=True, p=weights)
def draw_shuffled_assignment(weights, sample_size):
weights = clean_weights(weights)
treatment_list = [int(np.ceil(w * sample_size)) * [i]
for i, w in enumerate(weights)]
assignment = reduce(add, treatment_list, [])
shuffle(assignment)
return assignment
| 27.942857 | 71 | 0.691207 |
ce16a6d757f26e56ae781c5a290a35a0956ff03d | 8,650 | py | Python | src/df_v1/scripts/train/train_setup.py | Kokoro-AI/heart-disease-prediction-tf2 | b0b465254744b8ff6192d2254bd0cb6d83217ac0 | [
"MIT"
] | 2 | 2020-02-12T01:05:14.000Z | 2020-07-11T13:29:48.000Z | src/df_v1/scripts/train/train_setup.py | Kokoro-AI/heart-disease-prediction-tf2 | b0b465254744b8ff6192d2254bd0cb6d83217ac0 | [
"MIT"
] | 3 | 2020-02-10T23:57:42.000Z | 2020-06-12T15:49:40.000Z | src/df_v1/scripts/train/train_setup.py | Kokoro-AI/heart-disease-prediction-tf2 | b0b465254744b8ff6192d2254bd0cb6d83217ac0 | [
"MIT"
] | null | null | null | """
Logic for model creation, training launching and actions needed to be
accomplished during training (metrics monitor, model saving etc.)
"""
import os
import time
import json
import numpy as np
import tensorflow as tf
from datetime import datetime
from tensorflow.keras import Input, Model
from src.datasets import load
from src.utils.callbacks import create_callbacks
from tensorflow.keras.layers import Dense, DenseFeatures, Dropout
from sklearn.model_selection import StratifiedKFold, train_test_split
def train(config):
np.random.seed(2020)
tf.random.set_seed(2020)
# Useful data
now = datetime.now()
now_as_str = now.strftime('%y_%m_%d-%H:%M:%S')
# Output files
checkpoint_path = config['model.save_path']
config_path = config['output.config_path'].format(date=now_as_str)
csv_output_path = config['output.train_path'].format(date=now_as_str)
tensorboard_summary_dir = config['summary.save_path']
summary_path = "results/summary.csv"
# Output dirs
data_dir = "data/"
config_dir = config_path[:config_path.rfind('/')]
output_dir = csv_output_path[:csv_output_path.rfind('/')]
# Create folder for config
if not os.path.exists(config_dir):
os.makedirs(config_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# generate config file
file = open(config_path, 'w')
file.write(json.dumps(config, indent=2))
file.close()
file = open(csv_output_path, 'w')
file.write("")
file.close()
# create summary file if not exists
if not os.path.exists(summary_path):
file = open(summary_path, 'w')
file.write("datetime, model, config, acc_std, acc_mean\n")
file.close()
# Data loader
if not os.path.exists(data_dir):
os.makedirs(data_dir)
_, X, y = load(data_dir, config)
# Defines datasets on the input data.
batch_size = config['data.batch_size']
# Determine device
if config['data.cuda']:
cuda_num = config['data.gpu']
device_name = f'GPU:{cuda_num}'
else:
device_name = 'CPU:0'
time_start = time.time()
# define 10-fold cross validation test harness
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
cvscores = []
print ("Running model performance validation... please wait!")
for split in range(10):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=40 + split)
# Compiles a model, prints the model summary, and saves the model diagram into a png file.
model = create_model(learning_rate=config['train.lr'])
model.summary()
split_checkpoint_path = checkpoint_path.format(split=split)
split_results_path = csv_output_path.format(split=split)
split_checkpoint_dir = split_checkpoint_path[:split_checkpoint_path.rfind('/')]
split_results_dir = split_results_path[:split_results_path.rfind('/')]
# Create folder for model
if not os.path.exists(split_checkpoint_dir):
os.makedirs(split_checkpoint_dir)
# Create output for train process
if not os.path.exists(split_results_dir):
os.makedirs(split_results_dir)
tf.keras.utils.plot_model(model, os.path.join(split_results_dir, "keras_model.png"), show_shapes=True, show_layer_names=False)
callbacks = create_callbacks(
tensorboard_summary_dir.format(split=split),
split_results_path,
split_checkpoint_path,
patience=config['train.patience']
)
# Fit the model
with tf.device(device_name):
history = model.fit(
dict(X_train),
y_train,
validation_split=0.1,
epochs=config['train.epochs'],
batch_size=config['data.batch_size'],
use_multiprocessing=True,
callbacks=callbacks
)
# evaluate the model
scores = model.evaluate(dict(X_test), y_test, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
cvscores.append(scores[1] * 100)
# Runs prediction on test data.
predictions = tf.round(model.predict(dict(X_test))).numpy().flatten()
print("Predictions on test data:")
print(predictions)
model_path = tf.train.latest_checkpoint(split_checkpoint_dir, latest_filename=split_checkpoint_path)
if not model_path:
print("Skipping evaluation. No checkpoint found in: {}".format(split_checkpoint_dir))
else:
model_from_saved = tf.keras.models.load_model(model_path)
model_from_saved.summary()
# Runs test data through the reloaded model to make sure the results are same.
predictions_from_saved = tf.round(model_from_saved.predict(dict(X_test))).numpy().flatten()
np.testing.assert_array_equal(predictions_from_saved, predictions)
print ("Done.")
print ("Summary report on mean and std.")
# The average and standard deviation of the model performance
print("%.2f%% (+/- %.2f%%)" % (np.mean(cvscores), np.std(cvscores)))
time_end = time.time()
summary = "{}, {}, df, {}, {}, {}\n".format(now_as_str, config['data.dataset'], config_path, np.std(cvscores), np.mean(cvscores))
print(summary)
file = open(summary_path, 'a+')
file.write(summary)
file.close()
elapsed = time_end - time_start
h, min = elapsed//3600, elapsed%3600//60
sec = elapsed-min*60
print(f"Training took: {h:.2f}h {min:.2f}m {sec:.2f}s!")
def create_model(learning_rate=0.01):
"""
Constructs a model using various layers and compiles the model with proper
optimizer/loss/metrics.
"""
feature_columns, feature_layer_inputs = get_feature_transform()
feature_layer = DenseFeatures(feature_columns, name="feature")
feature_layer_outputs = feature_layer(feature_layer_inputs)
x = Dense(128, kernel_initializer="normal", activation="relu", name="hidden_layer_1")(feature_layer_outputs)
x = Dropout(0.2, name="dropout_1")(x)
x = Dense(128, kernel_initializer="normal", activation="relu", name="hidden_layer_2")(x)
baggage_pred = Dense(1, activation="sigmoid", name="target")(x)
model = Model(inputs=[v for v in feature_layer_inputs.values()], outputs=baggage_pred)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
loss="binary_crossentropy",
metrics=["accuracy"])
return model
def get_feature_transform():
"""
Builds a DenseFeatures layer as feature transformation.
The function handles all feature transformation such as bucketizing,
vectorizing (one-hot encoding), etc.
"""
feature_columns = []
feature_layer_inputs = {}
# numeric cols
for header in ['age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'ca']:
feature_columns.append(tf.feature_column.numeric_column(header))
feature_layer_inputs[header] = tf.keras.Input(shape=(1,), name=header)
# bucketized cols
age = tf.feature_column.numeric_column("age")
age_buckets = tf.feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns.append(age_buckets)
# indicator cols
thal = tf.feature_column.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
thal_one_hot = tf.feature_column.indicator_column(thal)
feature_columns.append(thal_one_hot)
feature_layer_inputs['thal'] = tf.keras.Input(shape=(1,), name='thal', dtype=tf.string)
sex = tf.feature_column.categorical_column_with_vocabulary_list(
'sex', ['0', '1'])
sex_one_hot = tf.feature_column.indicator_column(sex)
feature_columns.append(sex_one_hot)
feature_layer_inputs['sex'] = tf.keras.Input(shape=(1,), name='sex', dtype=tf.string)
cp = tf.feature_column.categorical_column_with_vocabulary_list(
'cp', ['0', '1', '2', '3'])
cp_one_hot = tf.feature_column.indicator_column(cp)
feature_columns.append(cp_one_hot)
feature_layer_inputs['cp'] = tf.keras.Input(shape=(1,), name='cp', dtype=tf.string)
slope = tf.feature_column.categorical_column_with_vocabulary_list(
'slope', ['0', '1', '2'])
slope_one_hot = tf.feature_column.indicator_column(slope)
feature_columns.append(slope_one_hot)
feature_layer_inputs['slope'] = tf.keras.Input(shape=(1,), name='slope', dtype=tf.string)
return feature_columns, feature_layer_inputs | 36.965812 | 134 | 0.672023 |
51ef63246c19b1b846d1f2f432aaac23fb6a750a | 821 | py | Python | Python/1. Python Basics/mit-6.00.1-python solutions/lec11.4-coordinate.py | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | null | null | null | Python/1. Python Basics/mit-6.00.1-python solutions/lec11.4-coordinate.py | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | null | null | null | Python/1. Python Basics/mit-6.00.1-python solutions/lec11.4-coordinate.py | okara83/Becoming-a-Data-Scientist | f09a15f7f239b96b77a2f080c403b2f3e95c9650 | [
"MIT"
] | 2 | 2022-02-09T15:41:33.000Z | 2022-02-11T07:47:40.000Z | # lec11.4-coordinate.py
#
# Lecture 11 - Classes
# Video 4 - Adding Methods to a Class
#
# edX MITx 6.00.1x
# Introduction to Computer Science and Programming Using Python
import math
def sq(x):
return x*x
class Coordinate(object):
def __init__(self, x, y):
self.x = x
self.y = y
# define a method which Python will use when needs a string to print
# Without this 'print c' will display line like this:
# <__main__.Coordinate object at 0x1006df4d0>
def __str__(self):
return "<"+str(self.x)+","+str(self.y)+">"
def distance(self,other):
return math.sqrt(sq(self.x - other.x)
+ sq(self.y - other.y))
c = Coordinate(3,4)
Origin = Coordinate(0,0)
# added print c to show what is printed using new def __str__(self) method
print c
| 24.147059 | 74 | 0.637028 |
69fb74ef52339ad4846fcb4a8841d2503d0480a4 | 10,193 | py | Python | huaweicloud-sdk-gaussdb/huaweicloudsdkgaussdb/v3/model/mysql_slow_log_list.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-gaussdb/huaweicloudsdkgaussdb/v3/model/mysql_slow_log_list.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-gaussdb/huaweicloudsdkgaussdb/v3/model/mysql_slow_log_list.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class MysqlSlowLogList:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'node_id': 'str',
'count': 'str',
'time': 'str',
'lock_time': 'str',
'rows_sent': 'str',
'rows_examined': 'str',
'database': 'str',
'users': 'str',
'query_sample': 'str',
'type': 'str',
'start_time': 'str',
'client_ip': 'str'
}
attribute_map = {
'node_id': 'node_id',
'count': 'count',
'time': 'time',
'lock_time': 'lock_time',
'rows_sent': 'rows_sent',
'rows_examined': 'rows_examined',
'database': 'database',
'users': 'users',
'query_sample': 'query_sample',
'type': 'type',
'start_time': 'start_time',
'client_ip': 'client_ip'
}
def __init__(self, node_id=None, count=None, time=None, lock_time=None, rows_sent=None, rows_examined=None, database=None, users=None, query_sample=None, type=None, start_time=None, client_ip=None):
"""MysqlSlowLogList - a model defined in huaweicloud sdk"""
self._node_id = None
self._count = None
self._time = None
self._lock_time = None
self._rows_sent = None
self._rows_examined = None
self._database = None
self._users = None
self._query_sample = None
self._type = None
self._start_time = None
self._client_ip = None
self.discriminator = None
if node_id is not None:
self.node_id = node_id
if count is not None:
self.count = count
if time is not None:
self.time = time
if lock_time is not None:
self.lock_time = lock_time
if rows_sent is not None:
self.rows_sent = rows_sent
if rows_examined is not None:
self.rows_examined = rows_examined
if database is not None:
self.database = database
if users is not None:
self.users = users
if query_sample is not None:
self.query_sample = query_sample
if type is not None:
self.type = type
if start_time is not None:
self.start_time = start_time
if client_ip is not None:
self.client_ip = client_ip
@property
def node_id(self):
"""Gets the node_id of this MysqlSlowLogList.
节点ID。
:return: The node_id of this MysqlSlowLogList.
:rtype: str
"""
return self._node_id
@node_id.setter
def node_id(self, node_id):
"""Sets the node_id of this MysqlSlowLogList.
节点ID。
:param node_id: The node_id of this MysqlSlowLogList.
:type: str
"""
self._node_id = node_id
@property
def count(self):
"""Gets the count of this MysqlSlowLogList.
执行次数。
:return: The count of this MysqlSlowLogList.
:rtype: str
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this MysqlSlowLogList.
执行次数。
:param count: The count of this MysqlSlowLogList.
:type: str
"""
self._count = count
@property
def time(self):
"""Gets the time of this MysqlSlowLogList.
执行时间。
:return: The time of this MysqlSlowLogList.
:rtype: str
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this MysqlSlowLogList.
执行时间。
:param time: The time of this MysqlSlowLogList.
:type: str
"""
self._time = time
@property
def lock_time(self):
"""Gets the lock_time of this MysqlSlowLogList.
等待锁时间。
:return: The lock_time of this MysqlSlowLogList.
:rtype: str
"""
return self._lock_time
@lock_time.setter
def lock_time(self, lock_time):
"""Sets the lock_time of this MysqlSlowLogList.
等待锁时间。
:param lock_time: The lock_time of this MysqlSlowLogList.
:type: str
"""
self._lock_time = lock_time
@property
def rows_sent(self):
"""Gets the rows_sent of this MysqlSlowLogList.
结果行数量。
:return: The rows_sent of this MysqlSlowLogList.
:rtype: str
"""
return self._rows_sent
@rows_sent.setter
def rows_sent(self, rows_sent):
"""Sets the rows_sent of this MysqlSlowLogList.
结果行数量。
:param rows_sent: The rows_sent of this MysqlSlowLogList.
:type: str
"""
self._rows_sent = rows_sent
@property
def rows_examined(self):
"""Gets the rows_examined of this MysqlSlowLogList.
扫描的行数量。
:return: The rows_examined of this MysqlSlowLogList.
:rtype: str
"""
return self._rows_examined
@rows_examined.setter
def rows_examined(self, rows_examined):
"""Sets the rows_examined of this MysqlSlowLogList.
扫描的行数量。
:param rows_examined: The rows_examined of this MysqlSlowLogList.
:type: str
"""
self._rows_examined = rows_examined
@property
def database(self):
"""Gets the database of this MysqlSlowLogList.
所属数据库。
:return: The database of this MysqlSlowLogList.
:rtype: str
"""
return self._database
@database.setter
def database(self, database):
"""Sets the database of this MysqlSlowLogList.
所属数据库。
:param database: The database of this MysqlSlowLogList.
:type: str
"""
self._database = database
@property
def users(self):
"""Gets the users of this MysqlSlowLogList.
账号。
:return: The users of this MysqlSlowLogList.
:rtype: str
"""
return self._users
@users.setter
def users(self, users):
"""Sets the users of this MysqlSlowLogList.
账号。
:param users: The users of this MysqlSlowLogList.
:type: str
"""
self._users = users
@property
def query_sample(self):
"""Gets the query_sample of this MysqlSlowLogList.
执行语法。
:return: The query_sample of this MysqlSlowLogList.
:rtype: str
"""
return self._query_sample
@query_sample.setter
def query_sample(self, query_sample):
"""Sets the query_sample of this MysqlSlowLogList.
执行语法。
:param query_sample: The query_sample of this MysqlSlowLogList.
:type: str
"""
self._query_sample = query_sample
@property
def type(self):
"""Gets the type of this MysqlSlowLogList.
语句类型。
:return: The type of this MysqlSlowLogList.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this MysqlSlowLogList.
语句类型。
:param type: The type of this MysqlSlowLogList.
:type: str
"""
self._type = type
@property
def start_time(self):
"""Gets the start_time of this MysqlSlowLogList.
发生时间,UTC时间
:return: The start_time of this MysqlSlowLogList.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this MysqlSlowLogList.
发生时间,UTC时间
:param start_time: The start_time of this MysqlSlowLogList.
:type: str
"""
self._start_time = start_time
@property
def client_ip(self):
"""Gets the client_ip of this MysqlSlowLogList.
IP地址。
:return: The client_ip of this MysqlSlowLogList.
:rtype: str
"""
return self._client_ip
@client_ip.setter
def client_ip(self, client_ip):
"""Sets the client_ip of this MysqlSlowLogList.
IP地址。
:param client_ip: The client_ip of this MysqlSlowLogList.
:type: str
"""
self._client_ip = client_ip
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MysqlSlowLogList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.740291 | 202 | 0.56323 |
0f544fa217dbb47fd46f309a91d0058cdc68b075 | 11,681 | py | Python | concourse/pipelines/gen_pipeline.py | liang0/gpdb | b786d63a3cb93eafd0464199c436adafc2e64501 | [
"PostgreSQL",
"Apache-2.0"
] | 1 | 2022-03-07T02:51:44.000Z | 2022-03-07T02:51:44.000Z | concourse/pipelines/gen_pipeline.py | liang0/gpdb | b786d63a3cb93eafd0464199c436adafc2e64501 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | concourse/pipelines/gen_pipeline.py | liang0/gpdb | b786d63a3cb93eafd0464199c436adafc2e64501 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ----------------------------------------------------------------------
"""Generate pipeline (default: gpdb_master-generated.yml) from template (default:
templates/gpdb-tpl.yml).
Python module requirements:
- jinja2 (install through pip or easy_install)
"""
import argparse
import datetime
import os
import re
import subprocess
import yaml
from jinja2 import Environment, FileSystemLoader
PIPELINES_DIR = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_ENVIRONMENT = Environment(
autoescape=False,
loader=FileSystemLoader(os.path.join(PIPELINES_DIR, 'templates')),
trim_blocks=True,
lstrip_blocks=True,
variable_start_string='[[', # 'default {{ has conflict with pipeline syntax'
variable_end_string=']]',
extensions=['jinja2.ext.loopcontrols'])
# Variables that govern pipeline validation
RELEASE_VALIDATOR_JOB = ['Release_Candidate']
JOBS_THAT_ARE_GATES = ['gate_icw_start',
'gate_icw_end',
'gate_replication_start',
'gate_resource_groups_start',
'gate_cli_start',
'gate_ud_start',
'gate_advanced_analytics_start',
'gate_release_candidate_start']
JOBS_THAT_SHOULD_NOT_BLOCK_RELEASE = [
'compile_gpdb_binary_swap_centos6',
'icw_gporca_centos6_gpos_memory',
'walrep_2',
'client_loader_remote_test_aix',
'compile_gpdb_sles11',
'compile_gpdb_ubuntu16',
'compile_gpdb_aix7_remote',
'icw_gporca_sles11',
'icw_gporca_sles12',
'icw_planner_sles12',
'icw_planner_ubuntu16',
'icw_gporca_conan_ubuntu16',
'gpdb_packaging_ubuntu16',
'resource_group_sles12',
'madlib_build_gppkg',
'MADlib_Test_planner_centos6',
'MADlib_Test_orca_centos6',
'MADlib_Test_planner_centos7',
'MADlib_Test_orca_centos7',
'icw_extensions_gpcloud_ubuntu16'
] + RELEASE_VALIDATOR_JOB + JOBS_THAT_ARE_GATES
def suggested_git_remote():
default_remote = "<https://github.com/<github-user>/gpdb>"
remote = subprocess.check_output("git ls-remote --get-url", shell=True).rstrip()
if "greenplum-db/gpdb" in remote:
return default_remote
if "git@" in remote:
git_uri = remote.split('@')[1]
hostname, path = git_uri.split(':')
return 'https://%s/%s' % (hostname, path)
return remote
def suggested_git_branch():
default_branch = "<branch-name>"
branch = subprocess.check_output("git rev-parse --abbrev-ref HEAD", shell=True).rstrip()
if branch == "master" or branch == "5X_STABLE":
return default_branch
else:
return branch
def render_template(template_filename, context):
"""Render template"""
return TEMPLATE_ENVIRONMENT.get_template(template_filename).render(context)
def validate_pipeline_release_jobs(raw_pipeline_yml):
print "======================================================================"
print "Validate Pipeline Release Jobs"
print "----------------------------------------------------------------------"
pipeline_yml_cleaned = re.sub('{{', '', re.sub('}}', '', raw_pipeline_yml)) # ignore concourse v2.x variable interpolation
pipeline = yaml.load(pipeline_yml_cleaned)
jobs_raw = pipeline['jobs']
all_job_names = [job['name'] for job in jobs_raw]
release_candidate_job = [ job for job in jobs_raw if job['name'] == 'gate_release_candidate_start' ][0]
release_qualifying_job_names = release_candidate_job['plan'][0]['aggregate'][0]['passed']
jobs_that_are_not_blocking_release = [job for job in all_job_names if job not in release_qualifying_job_names]
unaccounted_for_jobs = [job for job in jobs_that_are_not_blocking_release if job not in JOBS_THAT_SHOULD_NOT_BLOCK_RELEASE]
if unaccounted_for_jobs:
print "Please add the following jobs as a Release_Candidate dependency or ignore them"
print "by adding them to JOBS_THAT_SHOULD_NOT_BLOCK_RELEASE in "+ __file__
print unaccounted_for_jobs
return False
print "Pipeline validated: all jobs accounted for"
return True
def create_pipeline():
"""Generate OS specific pipeline sections
"""
if ARGS.test_trigger_false:
test_trigger = "true"
else:
test_trigger = "false"
context = {
'template_filename': ARGS.template_filename,
'generator_filename': os.path.basename(__file__),
'timestamp': datetime.datetime.now(),
'os_types': ARGS.os_types,
'test_sections': ARGS.test_sections,
'pipeline_type': ARGS.pipeline_type,
'test_trigger': test_trigger
}
pipeline_yml = render_template(ARGS.template_filename, context)
if ARGS.pipeline_type == 'prod':
validated = validate_pipeline_release_jobs(pipeline_yml)
if not validated:
print "Refusing to update the pipeline file"
return False
with open(ARGS.output_filepath, 'w') as output:
header = render_template('pipeline_header.yml', context)
output.write(header)
output.write(pipeline_yml)
return True
def how_to_use_generated_pipeline_message():
msg = '\n'
msg += '======================================================================\n'
msg += ' Generate Pipeline type: .. : %s\n' % ARGS.pipeline_type
msg += ' Pipeline file ............ : %s\n' % ARGS.output_filepath
msg += ' Template file ............ : %s\n' % ARGS.template_filename
msg += ' OS Types ................. : %s\n' % ARGS.os_types
msg += ' Test sections ............ : %s\n' % ARGS.test_sections
msg += ' test_trigger ............. : %s\n' % ARGS.test_trigger_false
msg += '======================================================================\n\n'
if ARGS.pipeline_type == 'prod':
msg += 'NOTE: You can set the production pipelines with the following:\n\n'
msg += 'fly -t gpdb-prod \\\n'
msg += ' set-pipeline \\\n'
msg += ' -p gpdb_master \\\n'
msg += ' -c %s \\\n' % ARGS.output_filepath
msg += ' -l ~/workspace/gp-continuous-integration/secrets/gpdb_common-ci-secrets.yml \\\n'
msg += ' -l ~/workspace/gp-continuous-integration/secrets/gpdb_master-ci-secrets.yml \\\n'
msg += ' -v pipeline-name=gpdb_master\n\n'
msg += 'fly -t gpdb-prod \\\n'
msg += ' set-pipeline \\\n'
msg += ' -p gpdb_master_without_asserts \\\n'
msg += ' -c %s \\\n' % ARGS.output_filepath
msg += ' -l ~/workspace/gp-continuous-integration/secrets/gpdb_common-ci-secrets.yml \\\n'
msg += ' -l ~/workspace/gp-continuous-integration/secrets/gpdb_master_without_asserts-ci-secrets.yml \\\n' # pylint: disable=line-too-long
msg += ' -v pipeline-name=gpdb_master_without_asserts\n'
else:
pipeline_name = os.path.basename(ARGS.output_filepath).rsplit('.', 1)[0]
msg += 'NOTE: You can set the developer pipeline with the following:\n\n'
msg += 'fly -t gpdb-dev \\\n'
msg += ' set-pipeline \\\n'
msg += ' -p %s \\\n' % pipeline_name
msg += ' -c %s \\\n' % ARGS.output_filepath
msg += ' -l ~/workspace/gp-continuous-integration/secrets/gpdb_common-ci-secrets.yml \\\n'
msg += ' -l ~/workspace/gp-continuous-integration/secrets/gpdb_master-ci-secrets.dev.yml \\\n'
msg += ' -l ~/workspace/gp-continuous-integration/secrets/ccp_ci_secrets_gpdb-dev.yml \\\n'
msg += ' -v gpdb-git-remote=%s \\\n' % suggested_git_remote()
msg += ' -v gpdb-git-branch=%s \\\n' % suggested_git_branch()
msg += ' -v pipeline-name=%s \n' % pipeline_name
return msg
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(
description='Generate Concourse Pipeline utility',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
PARSER.add_argument('-T', '--template',
action='store',
dest='template_filename',
default="gpdb-tpl.yml",
help='Name of template to use, in templates/')
default_output_filename = "gpdb_master-generated.yml"
PARSER.add_argument('-o', '--output',
action='store',
dest='output_filepath',
default=os.path.join(PIPELINES_DIR, default_output_filename),
help='Output filepath')
PARSER.add_argument('-O', '--os_types',
action='store',
dest='os_types',
default=['centos6'],
choices=['centos6', 'centos7', 'sles', 'aix7', 'win', 'ubuntu16'],
nargs='+',
help='List of OS values to support')
PARSER.add_argument('-t', '--pipeline_type',
action='store',
dest='pipeline_type',
default='dev',
help='Pipeline type (production="prod")')
PARSER.add_argument('-a', '--test_sections',
action='store',
dest='test_sections',
choices=['ICW', 'Replication', 'ResourceGroups', 'Interconnect', 'CLI', 'UD', 'AA', 'Extensions'],
default=['ICW'],
nargs='+',
help='Select tests sections to run')
PARSER.add_argument('-n', '--test_trigger_false',
action='store_false',
default=True,
help='Set test triggers to "false". This only applies to dev pipelines.')
PARSER.add_argument('-u', '--user',
action='store',
dest='user',
default=os.getlogin(),
help='Developer userid to use for pipeline file name.')
ARGS = PARSER.parse_args()
if ARGS.pipeline_type == 'prod':
ARGS.os_types = ['centos6', 'centos7', 'sles', 'aix7', 'win', 'ubuntu16']
ARGS.test_sections = ['ICW', 'Replication', 'ResourceGroups', 'Interconnect', 'CLI', 'UD', 'AA', 'Extensions']
# if generating a dev pipeline but didn't specify an output, don't overwrite the master pipeline
if ARGS.pipeline_type != 'prod' and os.path.basename(ARGS.output_filepath) == default_output_filename:
default_dev_output_filename = 'gpdb-' + ARGS.pipeline_type + '-' + ARGS.user + '.yml'
ARGS.output_filepath = os.path.join(PIPELINES_DIR, default_dev_output_filename)
pipeline_created = create_pipeline()
if pipeline_created:
print how_to_use_generated_pipeline_message()
else:
exit(1)
| 41.130282 | 149 | 0.601404 |
51d2b4ce4674836786addf12e054db4eb5363a7c | 2,462 | py | Python | models/CVAE.py | PeterJaq/optical_film_toolbox | 0e2d2bfa5f1f93d405a2f25ee50e51771be777a5 | [
"Apache-2.0"
] | 4 | 2020-07-05T12:35:45.000Z | 2022-03-17T18:43:04.000Z | models/CVAE.py | PeterJaq/optical_film_toolbox | 0e2d2bfa5f1f93d405a2f25ee50e51771be777a5 | [
"Apache-2.0"
] | null | null | null | models/CVAE.py | PeterJaq/optical_film_toolbox | 0e2d2bfa5f1f93d405a2f25ee50e51771be777a5 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
class CVAE(tf.keras.Model):
"""Convolutional variational autoencoder."""
def __init__(self, latent_dim):
super(CVAE, self).__init__()
self.latent_dim = latent_dim
self.encoder = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(
filters=32, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Flatten(),
# No activation
tf.keras.layers.Dense(latent_dim + latent_dim),
]
)
self.decoder = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(latent_dim,)),
tf.keras.layers.Dense(units=7*7*32, activation=tf.nn.relu),
tf.keras.layers.Reshape(target_shape=(7, 7, 32)),
tf.keras.layers.Conv2DTranspose(
filters=64, kernel_size=3, strides=2, padding='same',
activation='relu'),
tf.keras.layers.Conv2DTranspose(
filters=32, kernel_size=3, strides=2, padding='same',
activation='relu'),
# No activation
tf.keras.layers.Conv2DTranspose(
filters=1, kernel_size=3, strides=1, padding='same'),
]
)
@tf.function
def sample(self, eps=None):
if eps is None:
eps = tf.random.normal(shape=(100, self.latent_dim))
return self.decode(eps, apply_sigmoid=True)
def encode(self, x):
mean, logvar = tf.split(self.encoder(x), num_or_size_splits=2, axis=1)
return mean, logvar
def reparameterize(self, mean, logvar):
eps = tf.random.normal(shape=mean.shape)
return eps * tf.exp(logvar * .5) + mean
def decode(self, z, apply_sigmoid=False):
logits = self.decoder(z)
if apply_sigmoid:
probs = tf.sigmoid(logits)
return probs
return logits
optimizer = tf.keras.optimizers.Adam(1e-4)
@tf.function
def train_step(model, x, optimizer):
"""Executes one training step and returns the loss.
This function computes the loss and gradients, and uses the latter to
update the model's parameters.
"""
with tf.GradientTape() as tape:
loss = compute_loss(model, x)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables)) | 32.394737 | 78 | 0.628351 |
e933abe906bbcbb59ad5ae882823135c3aec08d6 | 875 | py | Python | frigate/models.py | czipis/frigate | 0d3b99fafb9804b69913cf8a5490668a7077fedf | [
"MIT"
] | 4,083 | 2019-02-27T04:07:28.000Z | 2022-03-31T23:47:08.000Z | frigate/models.py | czipis/frigate | 0d3b99fafb9804b69913cf8a5490668a7077fedf | [
"MIT"
] | 1,817 | 2019-03-06T01:28:33.000Z | 2022-03-31T22:04:56.000Z | frigate/models.py | czipis/frigate | 0d3b99fafb9804b69913cf8a5490668a7077fedf | [
"MIT"
] | 538 | 2019-02-27T04:07:35.000Z | 2022-03-31T23:47:17.000Z | from numpy import unique
from peewee import *
from playhouse.sqlite_ext import *
class Event(Model):
id = CharField(null=False, primary_key=True, max_length=30)
label = CharField(index=True, max_length=20)
camera = CharField(index=True, max_length=20)
start_time = DateTimeField()
end_time = DateTimeField()
top_score = FloatField()
false_positive = BooleanField()
zones = JSONField()
thumbnail = TextField()
has_clip = BooleanField(default=True)
has_snapshot = BooleanField(default=True)
region = JSONField()
box = JSONField()
area = IntegerField()
class Recordings(Model):
id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20)
path = CharField(unique=True)
start_time = DateTimeField()
end_time = DateTimeField()
duration = FloatField()
| 29.166667 | 63 | 0.705143 |
dad2a110f5c778ede1dee6032ea8f6d085f72b8b | 21,753 | py | Python | tf_pose/estimator.py | dengseng/odroid-xu4-pose-based-action-recognition | 7458023f3663d52f4a0b97a9ad0488c6e6eadd43 | [
"Apache-2.0"
] | 39 | 2019-06-12T06:56:21.000Z | 2022-03-29T11:07:59.000Z | tf_pose/estimator.py | dengseng/odroid-xu4-pose-based-action-recognition | 7458023f3663d52f4a0b97a9ad0488c6e6eadd43 | [
"Apache-2.0"
] | 9 | 2020-09-25T22:32:02.000Z | 2022-02-09T23:45:10.000Z | mysite/pose/estimator.py | jaykang-heo/poseAnalysis | 34cfac4a889e2c973651c1c07740ea0908542d68 | [
"MIT"
] | 25 | 2020-01-11T22:25:36.000Z | 2022-01-23T14:43:51.000Z | import logging
import math
import slidingwindow as sw
import cv2
import numpy as np
import tensorflow as tf
import time
from tf_pose import common
from tf_pose.common import CocoPart
from tf_pose.tensblur.smoother import Smoother
try:
from tf_pose.pafprocess import pafprocess
except ModuleNotFoundError as e:
print(e)
print('you need to build c++ library for pafprocess. See : https://github.com/ildoonet/tf-pose-estimation/tree/master/tf_pose/pafprocess')
exit(-1)
logger = logging.getLogger('TfPoseEstimator')
logger.handlers.clear()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
def _round(v):
return int(round(v))
def _include_part(part_list, part_idx):
for part in part_list:
if part_idx == part.part_idx:
return True, part
return False, None
class Human:
"""
body_parts: list of BodyPart
"""
__slots__ = ('body_parts', 'pairs', 'uidx_list', 'score')
def __init__(self, pairs):
self.pairs = []
self.uidx_list = set()
self.body_parts = {}
for pair in pairs:
self.add_pair(pair)
self.score = 0.0
@staticmethod
def _get_uidx(part_idx, idx):
return '%d-%d' % (part_idx, idx)
def add_pair(self, pair):
self.pairs.append(pair)
self.body_parts[pair.part_idx1] = BodyPart(Human._get_uidx(pair.part_idx1, pair.idx1),
pair.part_idx1,
pair.coord1[0], pair.coord1[1], pair.score)
self.body_parts[pair.part_idx2] = BodyPart(Human._get_uidx(pair.part_idx2, pair.idx2),
pair.part_idx2,
pair.coord2[0], pair.coord2[1], pair.score)
self.uidx_list.add(Human._get_uidx(pair.part_idx1, pair.idx1))
self.uidx_list.add(Human._get_uidx(pair.part_idx2, pair.idx2))
def is_connected(self, other):
return len(self.uidx_list & other.uidx_list) > 0
def merge(self, other):
for pair in other.pairs:
self.add_pair(pair)
def part_count(self):
return len(self.body_parts.keys())
def get_max_score(self):
return max([x.score for _, x in self.body_parts.items()])
def get_face_box(self, img_w, img_h, mode=0):
"""
Get Face box compared to img size (w, h)
:param img_w:
:param img_h:
:param mode:
:return:
"""
# SEE : https://github.com/ildoonet/tf-pose-estimation/blob/master/tf_pose/common.py#L13
_NOSE = CocoPart.Nose.value
_NECK = CocoPart.Neck.value
_REye = CocoPart.REye.value
_LEye = CocoPart.LEye.value
_REar = CocoPart.REar.value
_LEar = CocoPart.LEar.value
_THRESHOLD_PART_CONFIDENCE = 0.2
parts = [part for idx, part in self.body_parts.items() if part.score > _THRESHOLD_PART_CONFIDENCE]
is_nose, part_nose = _include_part(parts, _NOSE)
if not is_nose:
return None
size = 0
is_neck, part_neck = _include_part(parts, _NECK)
if is_neck:
size = max(size, img_h * (part_neck.y - part_nose.y) * 0.8)
is_reye, part_reye = _include_part(parts, _REye)
is_leye, part_leye = _include_part(parts, _LEye)
if is_reye and is_leye:
size = max(size, img_w * (part_reye.x - part_leye.x) * 2.0)
size = max(size,
img_w * math.sqrt((part_reye.x - part_leye.x) ** 2 + (part_reye.y - part_leye.y) ** 2) * 2.0)
if mode == 1:
if not is_reye and not is_leye:
return None
is_rear, part_rear = _include_part(parts, _REar)
is_lear, part_lear = _include_part(parts, _LEar)
if is_rear and is_lear:
size = max(size, img_w * (part_rear.x - part_lear.x) * 1.6)
if size <= 0:
return None
if not is_reye and is_leye:
x = part_nose.x * img_w - (size // 3 * 2)
elif is_reye and not is_leye:
x = part_nose.x * img_w - (size // 3)
else: # is_reye and is_leye:
x = part_nose.x * img_w - size // 2
x2 = x + size
if mode == 0:
y = part_nose.y * img_h - size // 3
else:
y = part_nose.y * img_h - _round(size / 2 * 1.2)
y2 = y + size
# fit into the image frame
x = max(0, x)
y = max(0, y)
x2 = min(img_w - x, x2 - x) + x
y2 = min(img_h - y, y2 - y) + y
if _round(x2 - x) == 0.0 or _round(y2 - y) == 0.0:
return None
if mode == 0:
return {"x": _round((x + x2) / 2),
"y": _round((y + y2) / 2),
"w": _round(x2 - x),
"h": _round(y2 - y)}
else:
return {"x": _round(x),
"y": _round(y),
"w": _round(x2 - x),
"h": _round(y2 - y)}
def get_upper_body_box(self, img_w, img_h):
"""
Get Upper body box compared to img size (w, h)
:param img_w:
:param img_h:
:return:
"""
if not (img_w > 0 and img_h > 0):
raise Exception("img size should be positive")
_NOSE = CocoPart.Nose.value
_NECK = CocoPart.Neck.value
_RSHOULDER = CocoPart.RShoulder.value
_LSHOULDER = CocoPart.LShoulder.value
_THRESHOLD_PART_CONFIDENCE = 0.3
parts = [part for idx, part in self.body_parts.items() if part.score > _THRESHOLD_PART_CONFIDENCE]
part_coords = [(img_w * part.x, img_h * part.y) for part in parts if
part.part_idx in [0, 1, 2, 5, 8, 11, 14, 15, 16, 17]]
if len(part_coords) < 5:
return None
# Initial Bounding Box
x = min([part[0] for part in part_coords])
y = min([part[1] for part in part_coords])
x2 = max([part[0] for part in part_coords])
y2 = max([part[1] for part in part_coords])
# # ------ Adjust heuristically +
# if face points are detcted, adjust y value
is_nose, part_nose = _include_part(parts, _NOSE)
is_neck, part_neck = _include_part(parts, _NECK)
torso_height = 0
if is_nose and is_neck:
y -= (part_neck.y * img_h - y) * 0.8
torso_height = max(0, (part_neck.y - part_nose.y) * img_h * 2.5)
#
# # by using shoulder position, adjust width
is_rshoulder, part_rshoulder = _include_part(parts, _RSHOULDER)
is_lshoulder, part_lshoulder = _include_part(parts, _LSHOULDER)
if is_rshoulder and is_lshoulder:
half_w = x2 - x
dx = half_w * 0.15
x -= dx
x2 += dx
elif is_neck:
if is_lshoulder and not is_rshoulder:
half_w = abs(part_lshoulder.x - part_neck.x) * img_w * 1.15
x = min(part_neck.x * img_w - half_w, x)
x2 = max(part_neck.x * img_w + half_w, x2)
elif not is_lshoulder and is_rshoulder:
half_w = abs(part_rshoulder.x - part_neck.x) * img_w * 1.15
x = min(part_neck.x * img_w - half_w, x)
x2 = max(part_neck.x * img_w + half_w, x2)
# ------ Adjust heuristically -
# fit into the image frame
x = max(0, x)
y = max(0, y)
x2 = min(img_w - x, x2 - x) + x
y2 = min(img_h - y, y2 - y) + y
if _round(x2 - x) == 0.0 or _round(y2 - y) == 0.0:
return None
return {"x": _round((x + x2) / 2),
"y": _round((y + y2) / 2),
"w": _round(x2 - x),
"h": _round(y2 - y)}
def __str__(self):
return ' '.join([str(x) for x in self.body_parts.values()])
def __repr__(self):
return self.__str__()
class BodyPart:
"""
part_idx : part index(eg. 0 for nose)
x, y: coordinate of body part
score : confidence score
"""
__slots__ = ('uidx', 'part_idx', 'x', 'y', 'score')
def __init__(self, uidx, part_idx, x, y, score):
self.uidx = uidx
self.part_idx = part_idx
self.x, self.y = x, y
self.score = score
def get_part_name(self):
return CocoPart(self.part_idx)
def __str__(self):
return 'BodyPart:%d-(%.2f, %.2f) score=%.2f' % (self.part_idx, self.x, self.y, self.score)
def __repr__(self):
return self.__str__()
class PoseEstimator:
def __init__(self):
pass
@staticmethod
def estimate_paf(peaks, heat_mat, paf_mat):
pafprocess.process_paf(peaks, heat_mat, paf_mat)
humans = []
for human_id in range(pafprocess.get_num_humans()):
human = Human([])
is_added = False
for part_idx in range(18):
c_idx = int(pafprocess.get_part_cid(human_id, part_idx))
if c_idx < 0:
continue
is_added = True
human.body_parts[part_idx] = BodyPart(
'%d-%d' % (human_id, part_idx), part_idx,
float(pafprocess.get_part_x(c_idx)) / heat_mat.shape[1],
float(pafprocess.get_part_y(c_idx)) / heat_mat.shape[0],
pafprocess.get_part_score(c_idx)
)
if is_added:
score = pafprocess.get_score(human_id)
human.score = score
humans.append(human)
return humans
class TfPoseEstimator:
# TODO : multi-scale
def __init__(self, graph_path, target_size=(320, 240), tf_config=None):
self.target_size = target_size
# load graph
logger.info('loading graph from %s(default size=%dx%d)' % (graph_path, target_size[0], target_size[1]))
with tf.gfile.GFile(graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
self.graph = tf.get_default_graph()
tf.import_graph_def(graph_def, name='TfPoseEstimator')
self.persistent_sess = tf.Session(graph=self.graph, config=tf_config)
# for op in self.graph.get_operations():
# print(op.name)
# for ts in [n.name for n in tf.get_default_graph().as_graph_def().node]:
# print(ts)
self.tensor_image = self.graph.get_tensor_by_name('TfPoseEstimator/image:0')
self.tensor_output = self.graph.get_tensor_by_name('TfPoseEstimator/Openpose/concat_stage7:0')
self.tensor_heatMat = self.tensor_output[:, :, :, :19]
self.tensor_pafMat = self.tensor_output[:, :, :, 19:]
self.upsample_size = tf.placeholder(dtype=tf.int32, shape=(2,), name='upsample_size')
self.tensor_heatMat_up = tf.image.resize_area(self.tensor_output[:, :, :, :19], self.upsample_size,
align_corners=False, name='upsample_heatmat')
self.tensor_pafMat_up = tf.image.resize_area(self.tensor_output[:, :, :, 19:], self.upsample_size,
align_corners=False, name='upsample_pafmat')
smoother = Smoother({'data': self.tensor_heatMat_up}, 25, 3.0)
gaussian_heatMat = smoother.get_output()
max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat, window_shape=(3, 3), pooling_type='MAX', padding='SAME')
self.tensor_peaks = tf.where(tf.equal(gaussian_heatMat, max_pooled_in_tensor), gaussian_heatMat,
tf.zeros_like(gaussian_heatMat))
self.heatMat = self.pafMat = None
# warm-up
self.persistent_sess.run(tf.variables_initializer(
[v for v in tf.global_variables() if
v.name.split(':')[0] in [x.decode('utf-8') for x in
self.persistent_sess.run(tf.report_uninitialized_variables())]
])
)
self.persistent_sess.run(
[self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up],
feed_dict={
self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)],
self.upsample_size: [target_size[1], target_size[0]]
}
)
self.persistent_sess.run(
[self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up],
feed_dict={
self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)],
self.upsample_size: [target_size[1] // 2, target_size[0] // 2]
}
)
self.persistent_sess.run(
[self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up],
feed_dict={
self.tensor_image: [np.ndarray(shape=(target_size[1], target_size[0], 3), dtype=np.float32)],
self.upsample_size: [target_size[1] // 4, target_size[0] // 4]
}
)
# logs
if self.tensor_image.dtype == tf.quint8:
logger.info('quantization mode enabled.')
def __del__(self):
# self.persistent_sess.close()
pass
def get_flops(self):
flops = tf.profiler.profile(self.graph, options=tf.profiler.ProfileOptionBuilder.float_operation())
return flops.total_float_ops
@staticmethod
def _quantize_img(npimg):
npimg_q = npimg + 1.0
npimg_q /= (2.0 / 2 ** 8)
# npimg_q += 0.5
npimg_q = npimg_q.astype(np.uint8)
return npimg_q
@staticmethod
def draw_humans(npimg, humans, imgcopy=False):
if imgcopy:
npimg = np.copy(npimg)
image_h, image_w = npimg.shape[:2]
centers = {}
for human in humans:
# draw point
for i in range(common.CocoPart.Background.value):
if i not in human.body_parts.keys():
continue
body_part = human.body_parts[i]
center = (int(body_part.x * image_w + 0.5), int(body_part.y * image_h + 0.5))
centers[i] = center
cv2.circle(npimg, center, 3, common.CocoColors[i], thickness=3, lineType=8, shift=0)
# draw line
for pair_order, pair in enumerate(common.CocoPairsRender):
if pair[0] not in human.body_parts.keys() or pair[1] not in human.body_parts.keys():
continue
# npimg = cv2.line(npimg, centers[pair[0]], centers[pair[1]], common.CocoColors[pair_order], 3)
cv2.line(npimg, centers[pair[0]], centers[pair[1]], common.CocoColors[pair_order], 3)
return npimg
def _get_scaled_img(self, npimg, scale):
get_base_scale = lambda s, w, h: max(self.target_size[0] / float(h), self.target_size[1] / float(w)) * s
img_h, img_w = npimg.shape[:2]
if scale is None:
if npimg.shape[:2] != (self.target_size[1], self.target_size[0]):
# resize
npimg = cv2.resize(npimg, self.target_size, interpolation=cv2.INTER_CUBIC)
return [npimg], [(0.0, 0.0, 1.0, 1.0)]
elif isinstance(scale, float):
# scaling with center crop
base_scale = get_base_scale(scale, img_w, img_h)
npimg = cv2.resize(npimg, dsize=None, fx=base_scale, fy=base_scale, interpolation=cv2.INTER_CUBIC)
o_size_h, o_size_w = npimg.shape[:2]
if npimg.shape[0] < self.target_size[1] or npimg.shape[1] < self.target_size[0]:
newimg = np.zeros(
(max(self.target_size[1], npimg.shape[0]), max(self.target_size[0], npimg.shape[1]), 3),
dtype=np.uint8)
newimg[:npimg.shape[0], :npimg.shape[1], :] = npimg
npimg = newimg
windows = sw.generate(npimg, sw.DimOrder.HeightWidthChannel, self.target_size[0], self.target_size[1], 0.2)
rois = []
ratios = []
for window in windows:
indices = window.indices()
roi = npimg[indices]
rois.append(roi)
ratio_x, ratio_y = float(indices[1].start) / o_size_w, float(indices[0].start) / o_size_h
ratio_w, ratio_h = float(indices[1].stop - indices[1].start) / o_size_w, float(
indices[0].stop - indices[0].start) / o_size_h
ratios.append((ratio_x, ratio_y, ratio_w, ratio_h))
return rois, ratios
elif isinstance(scale, tuple) and len(scale) == 2:
# scaling with sliding window : (scale, step)
base_scale = get_base_scale(scale[0], img_w, img_h)
npimg = cv2.resize(npimg, dsize=None, fx=base_scale, fy=base_scale, interpolation=cv2.INTER_CUBIC)
o_size_h, o_size_w = npimg.shape[:2]
if npimg.shape[0] < self.target_size[1] or npimg.shape[1] < self.target_size[0]:
newimg = np.zeros(
(max(self.target_size[1], npimg.shape[0]), max(self.target_size[0], npimg.shape[1]), 3),
dtype=np.uint8)
newimg[:npimg.shape[0], :npimg.shape[1], :] = npimg
npimg = newimg
window_step = scale[1]
windows = sw.generate(npimg, sw.DimOrder.HeightWidthChannel, self.target_size[0], self.target_size[1],
window_step)
rois = []
ratios = []
for window in windows:
indices = window.indices()
roi = npimg[indices]
rois.append(roi)
ratio_x, ratio_y = float(indices[1].start) / o_size_w, float(indices[0].start) / o_size_h
ratio_w, ratio_h = float(indices[1].stop - indices[1].start) / o_size_w, float(
indices[0].stop - indices[0].start) / o_size_h
ratios.append((ratio_x, ratio_y, ratio_w, ratio_h))
return rois, ratios
elif isinstance(scale, tuple) and len(scale) == 3:
# scaling with ROI : (want_x, want_y, scale_ratio)
base_scale = get_base_scale(scale[2], img_w, img_h)
npimg = cv2.resize(npimg, dsize=None, fx=base_scale, fy=base_scale, interpolation=cv2.INTER_CUBIC)
ratio_w = self.target_size[0] / float(npimg.shape[1])
ratio_h = self.target_size[1] / float(npimg.shape[0])
want_x, want_y = scale[:2]
ratio_x = want_x - ratio_w / 2.
ratio_y = want_y - ratio_h / 2.
ratio_x = max(ratio_x, 0.0)
ratio_y = max(ratio_y, 0.0)
if ratio_x + ratio_w > 1.0:
ratio_x = 1. - ratio_w
if ratio_y + ratio_h > 1.0:
ratio_y = 1. - ratio_h
roi = self._crop_roi(npimg, ratio_x, ratio_y)
return [roi], [(ratio_x, ratio_y, ratio_w, ratio_h)]
def _crop_roi(self, npimg, ratio_x, ratio_y):
target_w, target_h = self.target_size
h, w = npimg.shape[:2]
x = max(int(w * ratio_x - .5), 0)
y = max(int(h * ratio_y - .5), 0)
cropped = npimg[y:y + target_h, x:x + target_w]
cropped_h, cropped_w = cropped.shape[:2]
if cropped_w < target_w or cropped_h < target_h:
npblank = np.zeros((self.target_size[1], self.target_size[0], 3), dtype=np.uint8)
copy_x, copy_y = (target_w - cropped_w) // 2, (target_h - cropped_h) // 2
npblank[copy_y:copy_y + cropped_h, copy_x:copy_x + cropped_w] = cropped
else:
return cropped
def inference(self, npimg, resize_to_default=True, upsample_size=1.0):
if npimg is None:
raise Exception('The image is not valid. Please check your image exists.')
if resize_to_default:
upsample_size = [int(self.target_size[1] / 8 * upsample_size), int(self.target_size[0] / 8 * upsample_size)]
else:
upsample_size = [int(npimg.shape[0] / 8 * upsample_size), int(npimg.shape[1] / 8 * upsample_size)]
if self.tensor_image.dtype == tf.quint8:
# quantize input image
npimg = TfPoseEstimator._quantize_img(npimg)
pass
logger.debug('inference+ original shape=%dx%d' % (npimg.shape[1], npimg.shape[0]))
img = npimg
if resize_to_default:
img = self._get_scaled_img(npimg, None)[0][0]
peaks, heatMat_up, pafMat_up = self.persistent_sess.run(
[self.tensor_peaks, self.tensor_heatMat_up, self.tensor_pafMat_up], feed_dict={
self.tensor_image: [img], self.upsample_size: upsample_size
})
peaks = peaks[0]
self.heatMat = heatMat_up[0]
self.pafMat = pafMat_up[0]
logger.debug('inference- heatMat=%dx%d pafMat=%dx%d' % (
self.heatMat.shape[1], self.heatMat.shape[0], self.pafMat.shape[1], self.pafMat.shape[0]))
t = time.time()
humans = PoseEstimator.estimate_paf(peaks, self.heatMat, self.pafMat)
logger.debug('estimate time=%.5f' % (time.time() - t))
return humans
if __name__ == '__main__':
import pickle
f = open('./etcs/heatpaf1.pkl', 'rb')
data = pickle.load(f)
logger.info('size={}'.format(data['heatMat'].shape))
f.close()
t = time.time()
humans = PoseEstimator.estimate_paf(data['peaks'], data['heatMat'], data['pafMat'])
dt = time.time() - t;
t = time.time()
logger.info('elapsed #humans=%d time=%.8f' % (len(humans), dt))
| 38.500885 | 142 | 0.565163 |
6d722b2e1dddefd9994430418bef0270389a906f | 675 | py | Python | lsql/judge/templatetags/random_tags.py | Dashito14/lsql | 803abb14290aabfc2f33129f01aca87c6caac247 | [
"MIT"
] | null | null | null | lsql/judge/templatetags/random_tags.py | Dashito14/lsql | 803abb14290aabfc2f33129f01aca87c6caac247 | [
"MIT"
] | null | null | null | lsql/judge/templatetags/random_tags.py | Dashito14/lsql | 803abb14290aabfc2f33129f01aca87c6caac247 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright Enrique Martín <emartinm@ucm.es> 2021
Custom tags for generating random values to be used in templates.
"""
import random
import string
from django import template
register = template.Library()
# Symbols that can appear in a random ID
__ALPHABET = string.ascii_lowercase + string.ascii_uppercase + string.digits
@register.simple_tag
def random_id(size):
"""Generates a random ID of 'size' for future get_random_id calls. Returns the empty string.
"""
size = max(1, size) # non-positive sizes are considered as 1
gen_id = ""
for _ in range(size):
gen_id += random.choice(__ALPHABET)
return gen_id
| 24.107143 | 96 | 0.711111 |
583e7b641751e9dc7d60a5d138e7e2ff4ca3fd85 | 3,302 | py | Python | milvus/client/utils.py | ireneontheway5/pymilvus | b812449a98602b4370b3b3430bdeb18b24035e53 | [
"Apache-2.0"
] | null | null | null | milvus/client/utils.py | ireneontheway5/pymilvus | b812449a98602b4370b3b3430bdeb18b24035e53 | [
"Apache-2.0"
] | null | null | null | milvus/client/utils.py | ireneontheway5/pymilvus | b812449a98602b4370b3b3430bdeb18b24035e53 | [
"Apache-2.0"
] | null | null | null | from urllib.parse import urlparse
from ..settings import DefaultConfig as config
from .grpc_client.grpc_gen import milvus_pb2
from .grpc_client.grpc_gen.milvus_pb2 import QueryResult as Grpc_Result
from ..client.grpc_client.grpc_results import QueryResult
from ..client.exceptions import ParamError
def set_uri(uri):
try:
_uri = urlparse(uri) if uri else urlparse(config.GRPC_URI)
_host = _uri.hostname
_port = _uri.port
except (AttributeError, ValueError, TypeError) as e:
raise ParamError("uri is illegal: {}".format(e))
return "{}:{}".format(str(_host), str(_port))
def merge_results(results_list, topk, *args, **kwargs):
"""
merge query results
"""
def _reduce(source_ids, ids, source_diss, diss, k, reverse):
"""
"""
if source_diss[k - 1] <= diss[0]:
return source_ids, source_diss
if diss[k - 1] <= source_diss[0]:
return ids, diss
source_diss.extend(diss)
diss_t = enumerate(source_diss)
diss_m_rst = sorted(diss_t, key=lambda x: x[1], reverse=reverse)[:k]
diss_m_out = [id_ for _, id_ in diss_m_rst]
source_ids.extend(ids)
id_m_out = [source_ids[i] for i, _ in diss_m_rst]
return id_m_out, diss_m_out
status = milvus_pb2.Status(error_code=0,
reason="Success")
reverse = kwargs.get('reverse', False)
raw = kwargs.get('raw', False)
if not results_list:
return status, [], []
merge_id_results = []
merge_dis_results = []
row_num = 0
for files_collection in results_list:
if not isinstance(files_collection, Grpc_Result) and \
not isinstance(files_collection, QueryResult):
return ParamError("Result type is unknown.")
row_num = files_collection.row_num
if not row_num:
continue
ids = files_collection.ids
diss = files_collection.distances # distance collections
# Notice: batch_len is equal to topk, may need to compare with topk
batch_len = len(ids) // row_num
for row_index in range(row_num):
id_batch = ids[row_index * batch_len: (row_index + 1) * batch_len]
dis_batch = diss[row_index * batch_len: (row_index + 1) * batch_len]
if len(merge_id_results) < row_index:
raise ValueError("merge error")
if len(merge_id_results) == row_index:
merge_id_results.append(id_batch)
merge_dis_results.append(dis_batch)
else:
merge_id_results[row_index], merge_dis_results[row_index] = \
_reduce(merge_id_results[row_index], id_batch,
merge_dis_results[row_index], dis_batch,
batch_len,
reverse)
id_mrege_list = []
dis_mrege_list = []
for id_results, dis_results in zip(merge_id_results, merge_dis_results):
id_mrege_list.extend(id_results)
dis_mrege_list.extend(dis_results)
raw_result = Grpc_Result(
status=status,
row_num=row_num,
ids=id_mrege_list,
distances=dis_mrege_list
)
return raw_result if raw else QueryResult(raw_result)
| 31.75 | 80 | 0.621744 |
8baa91a46d98c9b9e7cf858c0442475ec0fc002f | 4,141 | py | Python | utils/sampling.py | qq519043202/pde-surrogate | d59ca48a2bd7e1bcb375e11b56def36e163db948 | [
"MIT"
] | 62 | 2019-05-26T12:58:17.000Z | 2022-03-19T07:07:19.000Z | utils/sampling.py | qq519043202/pde-surrogate | d59ca48a2bd7e1bcb375e11b56def36e163db948 | [
"MIT"
] | 1 | 2020-08-26T00:45:27.000Z | 2020-08-26T01:11:18.000Z | utils/sampling.py | qq519043202/pde-surrogate | d59ca48a2bd7e1bcb375e11b56def36e163db948 | [
"MIT"
] | 34 | 2019-05-28T09:10:39.000Z | 2022-03-04T03:04:38.000Z | """
Sampling in spatial domain
collocation points
boundary points
For sure, lots of people will work on how to use different sampling grid
to train fully-connected networks.
"""
import numpy as np
import torch
from .lhs import lhs
class SampleSpatial2d(object):
"""Uniform grid
(y, x)
h - height, or y axis
w - width, x axis
default output [0, 1] from [0, ngrid_h - 1], [0, ngrid_w - 1]
"""
def __init__(self, ngrid_h, ngrid_w):
self.ngrid_h = int(ngrid_h)
self.ngrid_w = int(ngrid_w)
self.n_grids = self.ngrid_h * self.ngrid_w
self.refactor = torch.FloatTensor(np.array([[ngrid_h-1, ngrid_w-1]]))
self.coordinates = self._coordinates()
self.coordinates_no_boundary = self._coordinates_no_boundary()
def _coordinates(self):
# super wired torch.meshgrid
grid_x, grid_y = np.meshgrid(np.arange(self.ngrid_w), np.arange(self.ngrid_h))
points = np.stack((grid_y.flatten(), grid_x.flatten()), 1)
return torch.FloatTensor(points)
def _coordinates_no_boundary(self):
grid_x, grid_y = np.meshgrid(np.arange(self.ngrid_w), np.arange(self.ngrid_h))
points = np.stack((grid_y[1:-1, 1:-1].flatten(), grid_x[1:-1, 1:-1].flatten()), 1)
return torch.FloatTensor(points)
def _sample2d(self, on_grid, n_samples=None, no_boundary=False):
if n_samples is None:
n_samples = self.n_grids
if on_grid:
if no_boundary:
points = self.coordinates_no_boundary.to(torch.float32) / self.refactor
else:
points = self.coordinates.to(torch.float32) / self.refactor
if n_samples < len(points):
points = points[torch.randperm(self.n_grids)[:n_samples]]
else:
print('n_samples is greater than grid size, set n_samples '\
'equals to grid size')
else:
points = torch.FloatTensor(lhs(2, n_samples))
return points
def _sample1d(self, horizontal, on_grid, n_samples=None):
"""
if on_grid is on, n_sampels is ignored if it is larger than ngrid.
"""
ngrid = self.ngrid_h if horizontal else self.ngrid_w
if n_samples is None:
n_samples = ngrid
if on_grid:
points = (torch.arange(float(ngrid)) / (ngrid-1))
if n_samples <= len(points):
points = points[torch.randperm(ngrid)[:n_samples]]
else:
print('n_samples is greater than grid size, set n_samples '\
'equals to grid size')
else:
points = torch.rand(n_samples)
return points
def left(self, on_grid=True, n_samples=None):
points = self._sample1d(horizontal=True, on_grid=on_grid, n_samples=n_samples)
return torch.stack((points, torch.zeros_like(points)), 1)
def right(self, on_grid=True, n_samples=None):
points = self._sample1d(horizontal=True, on_grid=on_grid, n_samples=n_samples)
return torch.stack((points, torch.ones_like(points)), 1)
def top(self, on_grid=True, n_samples=None):
points = self._sample1d(horizontal=False, on_grid=on_grid, n_samples=n_samples)
return torch.stack((torch.zeros_like(points), points), 1)
def bottom(self, on_grid=True, n_samples=None):
points = self._sample1d(horizontal=False, on_grid=on_grid, n_samples=n_samples)
return torch.stack((torch.ones_like(points), points), 1)
def colloc(self, on_grid=True, n_samples=None, no_boundary=False):
return self._sample2d(on_grid, n_samples, no_boundary)
if __name__ == '__main__':
ngrid_h = 10
ngrid_w = 10
sampler = SampleSpatial2d(ngrid_h, ngrid_w)
# print(sampler.refactor)
# print(sampler.refactor.shape)
# points = sampler.lhs(n_samples=1000, on_grid=True)
# print(points)
points = sampler.right(on_grid=True, n_samples=12)
# points = sampler.colloc(on_grid=False, n_samples=99, no_boundary=False)
print(points * sampler.refactor)
print(points.shape)
| 34.508333 | 90 | 0.635354 |
c09340d940c1b2099ebf876e08da5d17f39d70de | 1,710 | py | Python | creational/abstract_factory.py | usera2tt/design-patterns-written-in-python | 04f642ef543b1887bd7247eb5ef307a0357c9a88 | [
"MIT"
] | null | null | null | creational/abstract_factory.py | usera2tt/design-patterns-written-in-python | 04f642ef543b1887bd7247eb5ef307a0357c9a88 | [
"MIT"
] | null | null | null | creational/abstract_factory.py | usera2tt/design-patterns-written-in-python | 04f642ef543b1887bd7247eb5ef307a0357c9a88 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
class Element(ABC):
""" Concrete class """
def __init__(self, style: str, tag: str = ''):
self.style = style
self.tag = tag
@abstractmethod
def render(self, content: str) -> str:
raise NotImplementedError
class RawElement(Element):
def render(self, content: str):
return f'{content}'
class TailwindElement(Element):
def render(self, content: str):
return f'<{self.tag} class="{self.style}">{content}</{self.tag}>'
class UI(ABC):
""" Abstract factory class """
STYLE = 'Normal'
@abstractmethod
def create_header(self) -> Element:
raise NotImplementedError
@abstractmethod
def create_div(self) -> Element:
raise NotImplementedError
class RawUI(UI):
def create_header(self) -> Element:
return RawElement(self.STYLE)
def create_div(self) -> Element:
return RawElement(self.STYLE)
class TailwindUI(UI):
STYLE = 'TailwindUI'
def create_header(self) -> Element:
return TailwindElement(self.STYLE, 'h1')
def create_div(self) -> Element:
return TailwindElement(self.STYLE, 'div')
class UIRenderer:
""" Client class that uses abstract factory class to instantiate concrete classes """
def __init__(self, ui: UI):
self.ui = ui
def render(self):
header = self.ui.create_header()
div = self.ui.create_div()
print(header.render('headerrrr'))
print(div.render('divvvv'))
def main():
ui_renderer = UIRenderer(RawUI())
ui_renderer.render()
ui_renderer = UIRenderer(TailwindUI())
ui_renderer.render()
if __name__ == '__main__':
main()
| 21.111111 | 89 | 0.635673 |
3278035ebedafd63a9ce813cb496d1e959871544 | 259 | py | Python | server/api/items.py | samgjones/Find-It-Android | 4d26ee328beb52a91783b6bc3eb5d1bc4f696ad7 | [
"Apache-2.0"
] | 3 | 2016-12-30T23:29:53.000Z | 2016-12-31T03:21:07.000Z | server/api/items.py | samgjones/Find-It-Android | 4d26ee328beb52a91783b6bc3eb5d1bc4f696ad7 | [
"Apache-2.0"
] | 7 | 2021-02-10T02:25:09.000Z | 2022-03-02T14:54:34.000Z | server/api/items.py | samgjones/Find-It-Android | 4d26ee328beb52a91783b6bc3eb5d1bc4f696ad7 | [
"Apache-2.0"
] | 1 | 2020-12-14T07:19:08.000Z | 2020-12-14T07:19:08.000Z | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class ApiItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 19.923077 | 53 | 0.710425 |
dea3268cdda9b324386c463cf2ce71f5a7495e06 | 3,093 | py | Python | example/settings.py | underscorenygren/slick | c0c38c7b02f41f482b01f145b0348ecbb82952a9 | [
"MIT"
] | 1 | 2021-01-27T18:24:55.000Z | 2021-01-27T18:24:55.000Z | example/settings.py | underscorenygren/slick | c0c38c7b02f41f482b01f145b0348ecbb82952a9 | [
"MIT"
] | null | null | null | example/settings.py | underscorenygren/slick | c0c38c7b02f41f482b01f145b0348ecbb82952a9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Scrapy settings for example project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'example'
SPIDER_MODULES = ['example.spiders']
NEWSPIDER_MODULE = 'example.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'example (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'example.middlewares.ExampleSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'example.middlewares.ExampleDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'example.pipelines.ExamplePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 33.989011 | 103 | 0.774006 |
6d611fbd1b9102517802467cd78e0286f7e02197 | 2,877 | py | Python | demos/Dockers/demo_complete/ml_processor/ml_multiple_params.py | CogNet-5GPPP/CogNet_services | a08674b52dc238ecbe494ac36fc36614300e8632 | [
"Apache-2.0"
] | 1 | 2021-01-10T18:14:48.000Z | 2021-01-10T18:14:48.000Z | demos/Dockers/demo_complete/ml_processor/ml_multiple_params.py | CogNet-5GPPP/CogNet_services | a08674b52dc238ecbe494ac36fc36614300e8632 | [
"Apache-2.0"
] | null | null | null | demos/Dockers/demo_complete/ml_processor/ml_multiple_params.py | CogNet-5GPPP/CogNet_services | a08674b52dc238ecbe494ac36fc36614300e8632 | [
"Apache-2.0"
] | 4 | 2018-03-08T12:29:38.000Z | 2021-01-26T08:35:37.000Z | from kafka import KafkaConsumer
from kafka import KafkaProducer
from kafka.errors import KafkaError
import json
import random
import string
import time
# Read newpolicy.json
with open('policy.json') as json_data:
json_policy = json.load(json_data)
#Read topicName and conditionName from policy
topicName=json_policy['supa-policy']['supa-policy-target']['topicName']
conditionName=json_policy['supa-policy']['supa-policy-statement']['condition']['condition-name']
print(topicName)
print(conditionName)
# To consume latest messages from metrics topic
groupId="%s%s"%(topicName,conditionName)
consumer = KafkaConsumer('metrics',bootstrap_servers=["kafka:9092"],group_id=groupId)
# To produce new messages to kafka
producer = KafkaProducer(bootstrap_servers=["kafka:9092"])
#Push new policy to Kafka
future = producer.send('newpolicy',json.dumps(json_policy))
# Receive messages from kafka metrics topic
#
#
events=json_policy['supa-policy']['supa-policy-statement']['event']
for message in consumer:
# do something with received messages
#load each message as json data
try:
data = json.loads(message.value)
#get type of metric of the message
value_name=data['metric']['name']
#check that that metric is the metric we need
if value_name == "test.user_perc":
#get metric value
value_data=data['metric']['value']
#set that value to the previous defined policy
for i in range (0, len(events)-1):
#Machine learning algorithm
#set random value for each event-value depending
eventtype=json_policy['supa-policy']['supa-policy-statement']['event'][i]['event-value-type']
if eventtype == "float":
value_data=random.uniform(8.0, 100.0)
elif eventtype == "int":
value_data=some_machine_learning_operation(json_policy['supa-policy']['supa-policy-statement']['event'][i]['value-data'])
elif eventtype == "char":
arrayString=["192.168.10.1","192.168.10.2","192.168.10.3","192.168.10.4","192.168.10.5","192.168.10.6","192.168.10.7"]
value_data=random.choice(arrayString)
#print (eventtype)
#print "Random value is %s " %value_data
json_policy['supa-policy']['supa-policy-statement']['event'][i]['event-value']=value_data
#Send that policy as new measure to the listening topicName topic
future = producer.send(topicName,json.dumps(json_policy))
time.sleep(1)
#else:
#print("Not valid data")
except ValueError:
print "No valid data"
| 36.417722 | 145 | 0.616962 |
b4dc8b69d71531194f66022349f3885cadb17190 | 2,713 | py | Python | connect_to_sql.py | Himan10/MusicTerminal | 5c48b4db50033baa5c13277364cbc2fa29fd8dbf | [
"MIT"
] | 1 | 2021-07-26T22:33:23.000Z | 2021-07-26T22:33:23.000Z | connect_to_sql.py | Himan10/MusicTerminal | 5c48b4db50033baa5c13277364cbc2fa29fd8dbf | [
"MIT"
] | null | null | null | connect_to_sql.py | Himan10/MusicTerminal | 5c48b4db50033baa5c13277364cbc2fa29fd8dbf | [
"MIT"
] | 1 | 2022-01-04T10:55:41.000Z | 2022-01-04T10:55:41.000Z | import os
import sys
import sqlite3
class SongDatabase:
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.conn.close()
return isinstance(value, TypeError)
def __init__(self, dbname: str):
self.dbname = dbname+'.db' if not dbname.endswith('.db') else dbname
self.tablename = 'song_info'
self.path = os.path.join(os.getcwd(), self.dbname)
try:
self.conn = sqlite3.connect(self.path)
except Exception as err:
return err.args(0)
def isTableExist(self, cursor):
""" check if table exist inside a DB or not"""
# if file exists
if os.path.isfile(self.path):
if sys.getsizeof(self.path) > 100:
# data is stored in bytes
with open(self.path, 'rb') as file:
dbheader = file.read(100)
# defines an sql db file.
if dbheader[0:16] == b'SQLite format 3\x00':
# Check if table exists or not
cursor.execute("""
SELECT count(*)
FROM sqlite_master
WHERE type='table'
AND name='song_info'
""")
return cursor.fetchone()[0] == 1
return False
def feed_data(self, data: list):
if data.__sizeof__() == 40:
raise Exception(' Playlist Empty ')
# Create a cursor
cursor = self.conn.cursor()
tablename = "song_info"
# Create the table if not exists in DB (better than IF statement)
cursor.execute(" CREATE TABLE IF NOT EXISTS {0} ( songs text UNIQUE ) ".format(self.tablename))
# IGNORE INTO -> ignores the error message when inserting the data
# that already has been existed (especially in the unique key column)
cursor.executemany(""" INSERT OR IGNORE INTO {0} (songs) VALUES (?) """.format(self.tablename), data)
self.conn.commit()
def retrieve_data(self):
cursor = self.conn.cursor()
exist = self.isTableExist(cursor)
if not exist:
# delete that file if nothing is in it.
os.remove(self.dbname)
raise Exception("Playlist Table doesn't Exist")
# Get the songs column
cursor.execute(""" SELECT songs FROM {0}""".format(self.tablename))
# fetch the result
temp = cursor.fetchall()
# commit the change to database
self.conn.commit()
return temp
| 33.493827 | 109 | 0.530041 |
5ef2ffa79f5a4c238d300e43efae7f9f5a637a70 | 2,489 | py | Python | modules/extrec/pyrecon_nmap.py | OSSSP/pyrecon | 8a73fdfcafe59a7e7aa63a0a6839f40931067200 | [
"MIT"
] | 2 | 2019-01-21T14:58:21.000Z | 2021-03-15T07:41:13.000Z | modules/extrec/pyrecon_nmap.py | sandwichi/pyrecon | 6c02cc357e67e49409c1138c888c4ec5a33aafa2 | [
"MIT"
] | 6 | 2021-01-20T07:23:25.000Z | 2021-06-25T15:41:46.000Z | modules/extrec/pyrecon_nmap.py | OSSSP/pyrecon | 8a73fdfcafe59a7e7aa63a0a6839f40931067200 | [
"MIT"
] | 2 | 2019-01-21T14:33:13.000Z | 2020-12-05T20:46:57.000Z | #!/usr/bin/bash
import os
import subprocess
from modules.lib.colors import colors
def pyrecon_nmap(nmap_directory, output_directory):
target_subnet_file = os.path.join(output_directory, 'subnets.txt')
nmap_input = os.path.join(output_directory, 'external_recon/portscan/open_ports.txt')
nmap_output = os.path.join(nmap_directory, 'nmap')
if not os.path.isfile(nmap_input):
return
if os.path.isfile(os.path.join(nmap_directory, 'nmap.csv')) and os.stat(os.path.join(nmap_directory, 'nmap.csv')).st_size > 0:
raise FileExistsError
with open(target_subnet_file) as target_subnet_file_readlines:
cidrs = target_subnet_file_readlines.readlines()
number_cidr_nets = len(cidrs)
with open(nmap_input, 'r') as nmap_ports_file:
# Get port list and strip last comma
nmap_ports = nmap_ports_file.read().replace('\n', ',')[:-1]
if number_cidr_nets > 1:
print(colors.YELLOW + '[*] Running nmap on {0} hosts/CIDR nets:'.format(number_cidr_nets) + colors.RESET)
for cidr in cidrs[:-1]:
print('\t{0}'.format(cidr.rstrip('\n')))
print('\t{0}\n'.format(cidrs[-1].rstrip('\n')))
subprocess.call(["nmap", "-sS", "-Pn", "-v", "-A", "-p", nmap_ports, "-oA", nmap_output, "-iL", target_subnet_file])
elif number_cidr_nets == 1:
with open(target_subnet_file) as target_subnet_file_read:
cidr = target_subnet_file_read.read()
print(colors.YELLOW + '[*] Running nmap on {0} host/CIDR net:'.format(number_cidr_nets) + colors.RESET)
print('\t{0}'.format(cidr))
subprocess.call(["nmap", "-sS", "-Pn", "-v", "-A", "-p", nmap_ports, "-oA", nmap_output, "-iL", target_subnet_file])
# Convert nmap.xml to nmap.html with xsltproc
nmap_xml_output = os.path.join(os.path.abspath(nmap_directory), 'nmap.xml')
nmap_html_output = os.path.join(os.path.abspath(nmap_directory), 'nmap.html')
subprocess.call(["xsltproc", nmap_xml_output, "-o", nmap_html_output])
"""
Convert nmap.gnmap output to nmap.csv for spreadsheet imports:
This is done by making a system call to nmaptocsv which is placed in the path after setup.sh
nmaptocsv.py author's github:
https://github.com/maaaaz/nmaptocsv
"""
nmap_greppable_output = os.path.join(nmap_directory, 'nmap.gnmap')
nmaptocsv_output = os.path.join(nmap_directory, 'nmap.csv')
subprocess.call(["nmaptocsv", "-i", nmap_greppable_output, "-o", nmaptocsv_output, "-f", "ip-fqdn-os-protocol-port-service", "-d", ","])
print(colors.GREEN + '\n[+] Done. Nmap outputs saved to {0}\n'.format(nmap_directory) + colors.RESET)
| 50.795918 | 137 | 0.719566 |
dfc9573a946b4a07770f6d9e0d2aca1f62e00385 | 2,954 | py | Python | cnn.py | danielvarga/vat_tf | 0b40b256922b7996558504a5d2c3556b5f9fff15 | [
"MIT"
] | 261 | 2017-04-14T21:17:03.000Z | 2022-03-18T17:32:19.000Z | cnn.py | danielvarga/vat_tf | 0b40b256922b7996558504a5d2c3556b5f9fff15 | [
"MIT"
] | 17 | 2017-05-03T08:44:24.000Z | 2019-12-14T19:30:40.000Z | cnn.py | danielvarga/vat_tf | 0b40b256922b7996558504a5d2c3556b5f9fff15 | [
"MIT"
] | 98 | 2017-04-15T06:36:08.000Z | 2022-02-07T13:56:14.000Z | import tensorflow as tf
import numpy
import sys, os
import layers as L
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_float('keep_prob_hidden', 0.5, "dropout rate")
tf.app.flags.DEFINE_float('lrelu_a', 0.1, "lrelu slope")
tf.app.flags.DEFINE_boolean('top_bn', False, "")
def logit(x, is_training=True, update_batch_stats=True, stochastic=True, seed=1234):
h = x
rng = numpy.random.RandomState(seed)
h = L.conv(h, ksize=3, stride=1, f_in=3, f_out=128, seed=rng.randint(123456), name='c1')
h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b1'), FLAGS.lrelu_a)
h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=128, seed=rng.randint(123456), name='c2')
h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b2'), FLAGS.lrelu_a)
h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=128, seed=rng.randint(123456), name='c3')
h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b3'), FLAGS.lrelu_a)
h = L.max_pool(h, ksize=2, stride=2)
h = tf.nn.dropout(h, keep_prob=FLAGS.keep_prob_hidden, seed=rng.randint(123456)) if stochastic else h
h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=256, seed=rng.randint(123456), name='c4')
h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b4'), FLAGS.lrelu_a)
h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=256, seed=rng.randint(123456), name='c5')
h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b5'), FLAGS.lrelu_a)
h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=256, seed=rng.randint(123456), name='c6')
h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b6'), FLAGS.lrelu_a)
h = L.max_pool(h, ksize=2, stride=2)
h = tf.nn.dropout(h, keep_prob=FLAGS.keep_prob_hidden, seed=rng.randint(123456)) if stochastic else h
h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=512, seed=rng.randint(123456), padding="VALID", name='c7')
h = L.lrelu(L.bn(h, 512, is_training=is_training, update_batch_stats=update_batch_stats, name='b7'), FLAGS.lrelu_a)
h = L.conv(h, ksize=1, stride=1, f_in=512, f_out=256, seed=rng.randint(123456), name='c8')
h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b8'), FLAGS.lrelu_a)
h = L.conv(h, ksize=1, stride=1, f_in=256, f_out=128, seed=rng.randint(123456), name='c9')
h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b9'), FLAGS.lrelu_a)
h = tf.reduce_mean(h, reduction_indices=[1, 2]) # Global average pooling
h = L.fc(h, 128, 10, seed=rng.randint(123456), name='fc')
if FLAGS.top_bn:
h = L.bn(h, 10, is_training=is_training,
update_batch_stats=update_batch_stats, name='bfc')
return h
| 56.807692 | 119 | 0.698037 |
13cdc3f7e3fc470276f67792bc3b7305517ec727 | 469 | py | Python | deal/linter/_template.py | m4ta1l/deal | 2a8e9bf412b8635b00a2b798dd8802375814a1c8 | [
"MIT"
] | 1 | 2020-09-05T13:54:16.000Z | 2020-09-05T13:54:16.000Z | deal/linter/_template.py | m4ta1l/deal | 2a8e9bf412b8635b00a2b798dd8802375814a1c8 | [
"MIT"
] | 7 | 2020-09-05T13:54:28.000Z | 2020-11-27T05:59:19.000Z | deal/linter/_template.py | Smirenost/deal | 2a8e9bf412b8635b00a2b798dd8802375814a1c8 | [
"MIT"
] | null | null | null | # This file is excluded from coverage.
# project
from deal import ContractError
from deal._decorators.base import Base
# will be filled from the linter
contract = ...
func = ...
base = Base(validator=contract) # type: ignore
if func is not Ellipsis:
base.function = func
try:
base.validate(*args, **kwargs) # type: ignore # noqa: F821
except ContractError as exc:
result = False
if exc.args:
result = exc.args[0]
else:
result = True
| 19.541667 | 64 | 0.680171 |
cc3eb07c43cf4ac4554df122ce306a3bf57c2782 | 5,490 | py | Python | baselines/acktr/acktr_cont.py | speedcell4/baselines | c4be964fad7d015d1aa2f76a946c7c8c1025ce61 | [
"MIT"
] | null | null | null | baselines/acktr/acktr_cont.py | speedcell4/baselines | c4be964fad7d015d1aa2f76a946c7c8c1025ce61 | [
"MIT"
] | null | null | null | baselines/acktr/acktr_cont.py | speedcell4/baselines | c4be964fad7d015d1aa2f76a946c7c8c1025ce61 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from baselines import logger
import baselines.common as common
from baselines.common import tf_util as U
from baselines.acktr import kfac
from baselines.common.filters import ZFilter
def pathlength(path):
return path["reward"].shape[0] # Loss function that we'll differentiate to get the policy gradient
def rollout(env, policy, max_pathlength, animate=False, obfilter=None):
"""
Simulate the env and policy for max_pathlength steps
"""
ob = env.reset()
prev_ob = np.float32(np.zeros(ob.shape))
if obfilter: ob = obfilter(ob)
terminated = False
obs = []
acs = []
ac_dists = []
logps = []
rewards = []
for _ in range(max_pathlength):
if animate:
env.render()
state = np.concatenate([ob, prev_ob], -1)
obs.append(state)
ac, ac_dist, logp = policy.act(state)
acs.append(ac)
ac_dists.append(ac_dist)
logps.append(logp)
prev_ob = np.copy(ob)
scaled_ac = env.action_space.low + (ac + 1.) * 0.5 * (env.action_space.high - env.action_space.low)
scaled_ac = np.clip(scaled_ac, env.action_space.low, env.action_space.high)
ob, rew, done, _ = env.step(scaled_ac)
if obfilter: ob = obfilter(ob)
rewards.append(rew)
if done:
terminated = True
break
return {"observation": np.array(obs), "terminated": terminated,
"reward": np.array(rewards), "action": np.array(acs),
"action_dist": np.array(ac_dists), "logp": np.array(logps)}
def learn(env, policy, vf, gamma, lam, timesteps_per_batch, num_timesteps,
animate=False, callback=None, desired_kl=0.002):
obfilter = ZFilter(env.observation_space.shape)
max_pathlength = env.spec.timestep_limit
stepsize = tf.Variable(initial_value=np.float32(np.array(0.03)), name='stepsize')
inputs, loss, loss_sampled = policy.update_info
optim = kfac.KfacOptimizer(learning_rate=stepsize, cold_lr=stepsize * (1 - 0.9), momentum=0.9, kfac_update=2, \
epsilon=1e-2, stats_decay=0.99, async_=1, cold_iter=1,
weight_decay_dict=policy.wd_dict, max_grad_norm=None)
pi_var_list = []
for var in tf.trainable_variables():
if "pi" in var.name:
pi_var_list.append(var)
update_op, q_runner = optim.minimize(loss, loss_sampled, var_list=pi_var_list)
do_update = U.function(inputs, update_op)
U.initialize()
# start queue runners
enqueue_threads = []
coord = tf.train.Coordinator()
for qr in [q_runner, vf.q_runner]:
assert (qr != None)
enqueue_threads.extend(qr.create_threads(tf.get_default_session(), coord=coord, start=True))
i = 0
timesteps_so_far = 0
while True:
if timesteps_so_far > num_timesteps:
break
logger.log("********** Iteration %i ************" % i)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
path = rollout(env, policy, max_pathlength, animate=(len(paths) == 0 and (i % 10 == 0) and animate),
obfilter=obfilter)
paths.append(path)
n = pathlength(path)
timesteps_this_batch += n
timesteps_so_far += n
if timesteps_this_batch > timesteps_per_batch:
break
# Estimate advantage function
vtargs = []
advs = []
for path in paths:
rew_t = path["reward"]
return_t = common.discount(rew_t, gamma)
vtargs.append(return_t)
vpred_t = vf.predict(path)
vpred_t = np.append(vpred_t, 0.0 if path["terminated"] else vpred_t[-1])
delta_t = rew_t + gamma * vpred_t[1:] - vpred_t[:-1]
adv_t = common.discount(delta_t, gamma * lam)
advs.append(adv_t)
# Update value function
vf.fit(paths, vtargs)
# Build arrays for policy update
ob_no = np.concatenate([path["observation"] for path in paths])
action_na = np.concatenate([path["action"] for path in paths])
oldac_dist = np.concatenate([path["action_dist"] for path in paths])
adv_n = np.concatenate(advs)
standardized_adv_n = (adv_n - adv_n.mean()) / (adv_n.std() + 1e-8)
# Policy update
do_update(ob_no, action_na, standardized_adv_n)
min_stepsize = np.float32(1e-8)
max_stepsize = np.float32(1e0)
# Adjust stepsize
kl = policy.compute_kl(ob_no, oldac_dist)
if kl > desired_kl * 2:
logger.log("kl too high")
tf.assign(stepsize, tf.maximum(min_stepsize, stepsize / 1.5)).eval()
elif kl < desired_kl / 2:
logger.log("kl too low")
tf.assign(stepsize, tf.minimum(max_stepsize, stepsize * 1.5)).eval()
else:
logger.log("kl just right!")
logger.record_tabular("EpRewMean", np.mean([path["reward"].sum() for path in paths]))
logger.record_tabular("EpRewSEM", np.std([path["reward"].sum() / np.sqrt(len(paths)) for path in paths]))
logger.record_tabular("EpLenMean", np.mean([pathlength(path) for path in paths]))
logger.record_tabular("KL", kl)
if callback:
callback()
logger.dump_tabular()
i += 1
coord.request_stop()
coord.join(enqueue_threads)
| 37.60274 | 115 | 0.608561 |
183b323da24cbc13d8659212c3b5bf2eaf92fb94 | 276 | py | Python | tests/artificial/transf_Integration/trend_ConstantTrend/cycle_5/ar_12/test_artificial_1024_Integration_ConstantTrend_5_12_0.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/artificial/transf_Integration/trend_ConstantTrend/cycle_5/ar_12/test_artificial_1024_Integration_ConstantTrend_5_12_0.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/artificial/transf_Integration/trend_ConstantTrend/cycle_5/ar_12/test_artificial_1024_Integration_ConstantTrend_5_12_0.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 5, transform = "Integration", sigma = 0.0, exog_count = 0, ar_order = 12); | 39.428571 | 171 | 0.73913 |
a359660a606987086ece20e70d145d2a3b41b1ce | 14,085 | py | Python | daily.py | antoniomdk/daily-celtiberian | 8ab61a9535a5543cc1c7f947e474a7eb4bd8e030 | [
"MIT"
] | 4 | 2019-10-14T18:03:40.000Z | 2022-03-17T11:34:11.000Z | daily.py | antoniomdk/daily-celtiberian | 8ab61a9535a5543cc1c7f947e474a7eb4bd8e030 | [
"MIT"
] | 1 | 2019-10-16T16:24:44.000Z | 2019-10-16T16:24:44.000Z | daily.py | antoniomdk/daily-celtiberian | 8ab61a9535a5543cc1c7f947e474a7eb4bd8e030 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import re
import pickle
import glob
import os.path
import subprocess
import json
import arrow
import click
from functools import cmp_to_key
from itertools import groupby
from halo import Halo
from dateutil import parser
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import gspread
import slack
import site
SHEETS_SCOPES = [
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/spreadsheets',
]
CONFIG_DIR = os.path.join(os.path.expanduser('~'), '.daily')
USER_CONFIG_PATH = os.path.join(CONFIG_DIR, 'user.json')
SHEETS_TOKEN_PATH = os.path.join(CONFIG_DIR, 'gsheet_token')
SHEETS_CLIENT_SECRET_PATH = os.path.join(site.USER_BASE, __name__, 'client_sheets.json')
SLACK_INFO_PROMPT = '''
Create a Slack app for your workspace and get the API token in the following page:
https://api.slack.com/custom-integrations/legacy-tokens
'''
STRIVELABS_TIMESHEET = 'TimeSheet StriveLabs'
STRIVELABS_PROJECT_NAME = 'StriveLabs'
TEMPLATE_FILE = os.path.join(site.USER_BASE, __name__, 'template.txt')
WRITE_HELP = {
"project": "Project name (previously configured)",
"slack": "Enable/Disable writing on Slack",
"timesheet": "Enable/Disable writing on any timesheet",
"strivelabs": "Enable/Disable writing on Strivelabs timesheet"
}
class ParsingError(Exception):
pass
def create_git_log_command(since='midnight'):
return f'git log --no-merges --since={since} --format="%s" --author="$(git config user.name)"'
def get_commits():
command = create_git_log_command()
result = subprocess.run(
command, shell=True, capture_output=True, text=True)
if result.returncode:
return []
lines = result.stdout.replace('"', '').split('\n')
return list(filter(lambda line: line, lines))
def create_message_template():
commits = get_commits()
suggestions = '\n'.join(map(lambda commit: f'# - {commit}', commits))
with open(TEMPLATE_FILE) as template:
return template.read() + '\n' + suggestions
return suggestions
def validate_working_hours(hours):
pattern = re.compile(r'^(2[0-3]|[01]?[0-9]):([0-5]?[0-9])$')
return all(map(pattern.match, hours))
def parse_working_hours(line):
timestamps = line.replace(' ', '').replace('\n', '').split(',')
result = list(map(lambda ts: ts if ':' in ts else f'{ts}:00', timestamps))
if len(result) % 2 or not validate_working_hours(result):
raise ParsingError('Invalid working hours')
return result
def open_editor(contents=None):
if not contents:
template = create_message_template()
return click.edit(template, extension='')
def preprocess_file(content):
lines = [line.strip() for line in content.split('\n')]
filtered_lines = filter(lambda line: line and not line.startswith('#'),
lines)
return list(filtered_lines)
def create_daily_message(preprocessed_lines, date_format='Today'):
now = arrow.utcnow()
is_date_format = date_format.upper() != 'TODAY'
header = now.format(date_format) if is_date_format else date_format
body = ''
for line in preprocessed_lines[1:]:
if line.startswith('*'):
body = body + f'\n\t{line}'
elif line.startswith('-'):
body = body + f'\n{line}'
else:
body = body + f'\n-{line}'
return f'{header}:' + body
def create_description_for_timesheet(preprocessed_lines):
lines = [
line[1:].replace('.', '') for line in preprocessed_lines[1:]
if not line.startswith('*')
]
return '. '.join(lines).strip()
def get_credentials():
credentials = None
if not os.path.exists(CONFIG_DIR):
os.makedirs(CONFIG_DIR)
if os.path.exists(SHEETS_TOKEN_PATH):
with open(SHEETS_TOKEN_PATH, 'rb') as token:
credentials = pickle.load(token)
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
credentials.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
SHEETS_CLIENT_SECRET_PATH, SHEETS_SCOPES)
credentials = flow.run_local_server(port=0)
with open(SHEETS_TOKEN_PATH, 'wb') as token:
pickle.dump(credentials, token)
credentials.access_token = credentials.token
return credentials
def generate_project_config_filepath(project_name):
filename = project_name.strip().replace(' ', '-')
return os.path.join(CONFIG_DIR, f'{filename}.json')
def get_project_config(project_name):
try:
file_path = generate_project_config_filepath(project_name)
with open(file_path, 'r') as config_file:
return json.load(config_file)
except FileNotFoundError:
return None
def get_strivelabs_project_config(project_name):
result = get_project_config(STRIVELABS_PROJECT_NAME)
result["name"] = project_name
return result
def get_user_config():
try:
with open(USER_CONFIG_PATH, 'r') as config_file:
return json.load(config_file)
except FileNotFoundError:
print('User config not found. Please, you need to run daily init first')
exit(1)
def post_message_on_slack(message, project_config):
with Halo(text='Writing on Slack', spinner='dots') as spinner:
error = None
try:
slack_client = slack.WebClient(token=project_config['slack_token'])
response = slack_client.chat_postMessage(
channel=project_config['channel'], text=message)
if not response['ok'] or not response['message']['text'] == message:
error = response
except Exception as e:
error = e.args[1]
if error:
spinner.fail(str(error))
else:
spinner.succeed('Writed on Slack')
def is_url(string):
pattern = re.compile(r'^(?:http|ftp)s?://')
return pattern.match(string) is not None
def parse_date(string):
try:
date = parser.parse(string)
return arrow.get(date).floor('day')
except Exception as e:
return None
def find_cells_to_write(worksheet):
date_cell = worksheet.find(re.compile('date', re.IGNORECASE))
description_cell = worksheet.find(re.compile('description', re.IGNORECASE))
dates_column = worksheet.col_values(date_cell.col)
parsed_dates = [parse_date(date) for date in dates_column]
today_row = -1
today = arrow.utcnow().floor('day')
for i, date in enumerate(parsed_dates):
if today == date:
today_row = i + 1
break
hours_cells = worksheet.range(today_row, date_cell.col + 1, today_row,
date_cell.col + 6)
return hours_cells, (today_row, description_cell.col, description_cell.value)
def parse_working_hour(hour):
h1, m1 = hour.split(':')
return int(h1), int(m1)
def hours_comparator(hour1, hour2):
h1, m1 = parse_working_hour(hour1)
h2, m2 = parse_working_hour(hour2)
if h1 > h2 or (h1 == h2 and m1 > m2):
return 1
if h1 == h2 and m1 == m2:
return 0
return -1
def compute_total_hours(working_hours):
minutes = 0
for i in range(0, len(working_hours), 2):
h1, m1 = parse_working_hour(working_hours[i])
h2, m2 = parse_working_hour(working_hours[i + 1])
minutes += (h2 * 60 + m2) - (h1 * 60 + m1)
return (minutes // 60, minutes % 60)
def remove_consecutive_duplicates(l):
result = []
for k, group in groupby(l):
group_list = list(group)
if len(group_list) == 1:
result.append(k)
return result
def update_hours_cells(worksheet, cells, working_hours):
cell_values = [cell.value for cell in cells if cell.value.strip()]
sorted_hours = sorted(
cell_values + working_hours, key=cmp_to_key(hours_comparator))
unified_hours = remove_consecutive_duplicates(sorted_hours)
for cell, hour in zip(cells, unified_hours):
cell.value = hour
worksheet.update_cells(cells, 'USER_ENTERED')
def update_description(project_config, worksheet, description_cell, new_description):
value = ''
user_config = get_user_config()
is_strivelabs = project_config['timesheet'] == STRIVELABS_TIMESHEET
duplicated = new_description in description_cell[2]
if is_strivelabs and user_config["num_projects"] > 1 and not duplicated:
value = f'({project_config["name"]}) {description_cell[2]} {new_description}'
else:
value = new_description
worksheet.update_cell(description_cell[0], description_cell[1], value)
def update_timesheet(credentials, working_hours, description, project_config):
timesheet = project_config['timesheet']
with Halo(text=f'Writing on {timesheet}', spinner='dots') as spinner:
try:
gc = gspread.authorize(credentials)
sheet = gc.open_by_url(timesheet) if is_url(timesheet) else gc.open(timesheet)
worksheet = sheet.worksheet(project_config['timesheet_page'])
hours_cells, description_cell = find_cells_to_write(worksheet)
update_hours_cells(worksheet, hours_cells, working_hours)
update_description(project_config, worksheet, description_cell, description)
spinner.succeed(f'Writed on {timesheet}')
except Exception as e:
spinner.fail(str(e.args[1]))
def create_strivelabs_config(name):
config = {
'name': STRIVELABS_PROJECT_NAME,
'timesheet': STRIVELABS_TIMESHEET,
'timesheet_page': name
}
config_file = generate_project_config_filepath(STRIVELABS_PROJECT_NAME)
with open(config_file, 'w') as json_file:
json.dump(config, json_file)
def extract_data(content, project_config):
try:
preprocessed_lines = preprocess_file(content)
date_format = project_config['date_format']
daily_message = create_daily_message(preprocessed_lines, date_format)
description = create_description_for_timesheet(preprocessed_lines)
working_hours = parse_working_hours(preprocessed_lines[0])
return daily_message, working_hours, description
except ParsingError as e:
print(e)
except Exception:
pass
@click.group()
def cli():
pass
@cli.command()
def init():
"""Initialize the user config data.
It will asks for Google Sheet authorization"""
get_credentials()
is_strivelabs = click.confirm('Do you belong to Strivelabs?')
user_config = {'is_strivelabs': is_strivelabs}
if is_strivelabs:
name = click.prompt('Your name on Strivelabs timesheet')
create_strivelabs_config(name)
user_config['name'] = name
user_config['num_projects'] = 0
with open(USER_CONFIG_PATH, 'w') as json_file:
json.dump(user_config, json_file)
@cli.command()
def create_project():
"""Creates a new project"""
user_config = get_user_config()
name = click.prompt(
'Project Name (will be used to identify this project)')
timesheet = click.prompt('Timesheet name or url')
timesheet_page = click.prompt(
'Timesheet page name', default=user_config.get('name', ''))
date_format = click.prompt(
'Daily date format (empty for no date)', default='Today')
print(SLACK_INFO_PROMPT)
slack_token = click.prompt('Slack API token')
daily_channel = click.prompt(
'Slack channel name', default='#daily-scrum')
config = {
'name': name,
'date_format': date_format,
'slack_token': slack_token,
'channel': daily_channel,
'timesheet': timesheet,
'timesheet_page': timesheet_page
}
config_file = generate_project_config_filepath(name)
user_config['num_projects'] = user_config['num_projects'] + 1
with open(config_file, 'w') as file:
json.dump(config, file)
with open(USER_CONFIG_PATH, 'w') as file:
json.dump(user_config, file)
@cli.command()
@click.argument('project')
@click.option('--slack/--no-slack', default=True, help=WRITE_HELP["slack"])
@click.option('--timesheet/--no-timesheet', default=True, help=WRITE_HELP["timesheet"])
@click.option('--strivelabs/--no-strivelabs', default=True, help=WRITE_HELP["strivelabs"])
def write(project, slack, timesheet, strivelabs):
"""
Writes for a project.
It opens your default editor and writes your #daily-scrum message on
Slack and your working hours on Google Sheets.
"""
user_config = get_user_config()
project_config = get_project_config(project)
if not project_config:
print('\nProject not exists. Aborting...')
return
credentials = get_credentials()
content = open_editor()
if not content:
print('\nAborted')
return
data = extract_data(content, project_config)
if not data:
print('Invalid format. Aborting...')
return
daily_message, working_hours, description = data
total_working_hours = compute_total_hours(working_hours)
print(f'Total working hours: {total_working_hours[0]}:{total_working_hours[1]}')
if slack:
post_message_on_slack(daily_message, project_config)
if timesheet:
update_timesheet(credentials, working_hours, description,
project_config)
if user_config['is_strivelabs'] and strivelabs:
strivelabs_config = get_strivelabs_project_config(project)
update_timesheet(credentials, working_hours, description, strivelabs_config)
@cli.command(name="list")
def list_projects():
"""Lists all your projects"""
omitted = [SHEETS_TOKEN_PATH, SHEETS_CLIENT_SECRET_PATH, USER_CONFIG_PATH]
for file in glob.glob(os.path.join(CONFIG_DIR, '*.json')):
if file not in omitted:
filename = os.path.splitext(os.path.basename(file))[0]
if filename != STRIVELABS_PROJECT_NAME:
print(filename)
@cli.command(name="commits")
def list_commits():
print('\n'.join(get_commits()))
cli()
| 33.858173 | 98 | 0.67547 |
5781ba02c2627186d61095a8b6f004985a7e66a3 | 24,110 | py | Python | cromwell_tools/cromwell_api.py | kgalens/cromwell-tools | 23c756a6c25c67ad4ccfacc6f8cf459bcacdac5f | [
"BSD-3-Clause"
] | null | null | null | cromwell_tools/cromwell_api.py | kgalens/cromwell-tools | 23c756a6c25c67ad4ccfacc6f8cf459bcacdac5f | [
"BSD-3-Clause"
] | null | null | null | cromwell_tools/cromwell_api.py | kgalens/cromwell-tools | 23c756a6c25c67ad4ccfacc6f8cf459bcacdac5f | [
"BSD-3-Clause"
] | null | null | null | """
TODO: add some module docs
TODO: once switched to support only Py3.7+, replace all 'cls'
type annotations with the actual Types, rather than using the strings.
This in Py3.6(-) is limited by the lack of Postponed Evaluation of Annotations, see:
https://www.python.org/dev/peps/pep-0563/
"""
import time
import io
import json
import logging
import requests
from datetime import datetime, timedelta
from cromwell_tools.cromwell_auth import CromwellAuth
from cromwell_tools import utilities
from cromwell_tools.utilities import validate_cromwell_label
from cromwell_tools import exceptions
from typing import List, Union, Dict
logger = logging.getLogger(__name__)
_failed_statuses = ('Failed', 'Aborted', 'Aborting')
_cromwell_exclusive_query_keys = {
'end',
'includeSubworkflows',
'start',
'submission',
'page',
'pageSize',
}
_cromwell_inclusive_query_keys = {
'additionalQueryResultFields',
'excludeLabelAnd',
'excludeLabelOr',
'id',
'includeSubworkflows',
'label',
'labelor',
'name',
'status',
}
_cromwell_query_keys = _cromwell_exclusive_query_keys.union(
_cromwell_inclusive_query_keys
)
# TODO: use functools partial for get, post (set the authenticate commands)
class CromwellAPI(object):
"""Contains a set of classmethods that implement interfaces to cromwell REST API endpoints."""
# TODO: move the endpoints definitions to the corresponding functions after refactoring the unit tests and mocks
_abort_endpoint = '/api/workflows/v1/{uuid}/abort'
_status_endpoint = '/api/workflows/v1/{uuid}/status'
_submit_endpoint = '/api/workflows/v1'
_metadata_endpoint = '/api/workflows/v1/{uuid}/metadata'
_health_endpoint = '/engine/v1/status'
_release_hold_endpoint = '/api/workflows/v1/{uuid}/releaseHold'
_query_endpoint = '/api/workflows/v1/query'
_labels_endpoint = '/api/workflows/v1/{uuid}/labels'
@classmethod
def abort(
cls: 'CromwellAPI',
uuid: str,
auth: CromwellAuth,
raise_for_status: bool = False,
) -> requests.Response:
"""Request Cromwell to abort a running workflow by UUID.
Args:
uuid: A Cromwell workflow UUID, which is the workflow identifier.
auth: The authentication class holding headers or auth
information to a Cromwell server.
raise_for_status: Whether to check and raise for status based on the response.
Raises:
requests.exceptions.HTTPError: This will be raised when raise_for_status is True and Cromwell returns
a response that satisfies 400 <= response.status_code < 600.
Returns:
HTTP response from Cromwell.
"""
response = requests.post(
url=auth.url + cls._abort_endpoint.format(uuid=uuid),
auth=auth.auth,
headers=auth.header,
)
if raise_for_status:
cls._check_and_raise_status(response)
return response
@classmethod
def metadata(
cls: 'CromwellAPI',
uuid: str,
auth: CromwellAuth,
includeKey: Union[List[str], str] = None,
excludeKey: Union[List[str], str] = None,
expandSubWorkflows: bool = False,
raise_for_status: bool = False,
) -> requests.Response:
"""Retrieve the workflow and call-level metadata for a specified workflow by UUID.
Args:
uuid: A Cromwell workflow UUID, which is the workflow identifier.
auth: The authentication class holding headers or auth
information to a Cromwell server.
includeKey: When specified key(s) to include from the metadata. Matches any key
starting with the value. May not be used with excludeKey.
excludeKey: When specified key(s) to exclude from the metadata. Matches any key
starting with the value. May not be used with includeKey.
expandSubWorkflows: When true, metadata for sub workflows will be fetched
and inserted automatically in the metadata response.
raise_for_status: Whether to check and raise for status based on the response.
Raises:
requests.exceptions.HTTPError: This will be raised when raise_for_status is True and Cromwell returns
a response that satisfies 400 <= response.status_code < 600.
Returns:
HTTP response from Cromwell.
"""
if excludeKey and includeKey:
raise ValueError('includeKey and excludeKey may not be specified together!')
params = {'expandSubWorkflows': json.dumps(expandSubWorkflows)}
if isinstance(excludeKey, str):
logger.info(f'Adding {excludeKey} to the request parameter list.')
params['excludeKey'] = [excludeKey]
elif isinstance(excludeKey, list) and len(excludeKey) >= 1:
params['excludeKey'] = excludeKey
if isinstance(includeKey, str):
logger.info(f'Adding {includeKey} to the request parameter list.')
params['includeKey'] = [includeKey]
elif isinstance(includeKey, list) and len(includeKey) >= 1:
params['includeKey'] = includeKey
response = requests.get(
url=auth.url + cls._metadata_endpoint.format(uuid=uuid),
auth=auth.auth,
headers=auth.header,
params=params,
)
if raise_for_status:
cls._check_and_raise_status(response)
return response
@classmethod
def status(
cls: 'CromwellAPI',
uuid: str,
auth: CromwellAuth,
raise_for_status: bool = False,
) -> requests.Response:
"""Retrieves the current state for a workflow by UUID.
Args:
uuid: A Cromwell workflow UUID, which is the workflow identifier.
auth: The authentication class holding headers or auth
information to a Cromwell server.
raise_for_status: Whether to check and raise for status based on the response.
Raises:
requests.exceptions.HTTPError: This will be raised when raise_for_status is True and Cromwell returns
a response that satisfies 400 <= response.status_code < 600.
Returns:
HTTP response from Cromwell.
"""
response = requests.get(
url=auth.url + cls._status_endpoint.format(uuid=uuid),
auth=auth.auth,
headers=auth.header,
)
if raise_for_status:
cls._check_and_raise_status(response)
return response
@classmethod
def health(
cls: 'CromwellAPI', auth: CromwellAuth, raise_for_status: bool = False
) -> requests.Response:
"""Return the current health status of any monitored subsystems of the Cromwell Server.
Args:
auth: authentication class holding headers or auth
information to a Cromwell server.
raise_for_status: Whether to check and raise for status based on the response.
Raises:
requests.exceptions.HTTPError: This will be raised when raise_for_status is True and Cromwell returns
a response that satisfies 400 <= response.status_code < 600.
Returns:
HTTP response from Cromwell.
"""
response = requests.get(
url=auth.url + cls._health_endpoint, auth=auth.auth, headers=auth.header
)
if raise_for_status:
cls._check_and_raise_status(response)
return response
@classmethod
def submit(
cls: 'CromwellAPI',
auth: CromwellAuth,
wdl_file: Union[str, io.BytesIO],
inputs_files: Union[List[Union[str, io.BytesIO]], str, io.BytesIO] = None,
options_file: Union[str, io.BytesIO] = None,
dependencies: Union[str, List[str], io.BytesIO] = None,
label_file: Union[str, io.BytesIO] = None,
collection_name: str = None,
on_hold: bool = False,
validate_labels: bool = False,
raise_for_status: bool = False,
) -> requests.Response:
""" Submits a workflow to Cromwell.
Args:
auth: authentication class holding auth information to a
Cromwell server.
wdl_file: The workflow source file to submit for execution. Could be either the
path to the file (str) or the file content in io.BytesIO.
inputs_files: The input data in JSON
format. Could be either the path to the file (str) or the file content in io.BytesIO. This could also
be a list of unlimited input file paths/contents, each of them should have a type of
Union[str, io.BytesIO].
options_file: The Cromwell options file for workflows. Could be either
the path to the file (str) or the file content in io.BytesIO.
dependencies: Workflow dependency files. Could be the path to
the zipped file (str) containing dependencies, a list of paths(List[str]) to all dependency files to be
zipped or a zipped file in io.BytesIO.
label_file: A collection of key/value pairs for workflow labels in JSON
format, could be either the path to the JSON file (str) or the file content in io.BytesIO.
collection_name: Collection in SAM that the workflow should belong to, if use CaaS.
on_hold: Whether to submit the workflow in "On Hold" status.
validate_labels: If True, validate cromwell labels.
raise_for_status: Whether to check and raise for status based on the response.
Raises:
requests.exceptions.HTTPError: This will be raised when raise_for_status is True and Cromwell returns
a response that satisfies 400 <= response.status_code < 600.
Returns:
HTTP response from Cromwell.
"""
submission_manifest = utilities.prepare_workflow_manifest(
wdl_file=wdl_file,
inputs_files=inputs_files,
options_file=options_file,
dependencies=dependencies,
label_file=label_file,
collection_name=collection_name,
on_hold=on_hold,
)
if auth.service_key_content:
submission_manifest[
'workflowOptions'
] = utilities.compose_oauth_options_for_jes_backend_cromwell(
auth, submission_manifest.get('workflowOptions')
)
if validate_labels and label_file is not None:
validate_cromwell_label(submission_manifest['labels'])
response = requests.post(
auth.url + cls._submit_endpoint,
files=submission_manifest,
auth=auth.auth,
headers=auth.header,
)
if raise_for_status:
cls._check_and_raise_status(response)
return response
@classmethod
def wait(
cls: 'CromwellAPI',
workflow_ids: List[str],
auth: CromwellAuth,
timeout_minutes: int = 120,
poll_interval_seconds: int = 30,
verbose: bool = True,
) -> None:
"""Wait until cromwell returns successfully for each provided workflow
Given a list of workflow ids, wait until cromwell returns successfully for each status, or
one of the workflows fails or is aborted.
Args:
workflow_ids: A list of workflow ids to wait for terminal status.
timeout_minutes: Maximum number of minutes to wait.
auth: Authentication class holding headers
or auth information to a Cromwell server.
poll_interval_seconds: Number of seconds between checks for workflow
completion.
verbose: If True, report to stdout when all workflows succeed.
"""
start = datetime.now()
timeout = timedelta(minutes=int(timeout_minutes))
while True:
if datetime.now() - start > timeout:
msg = f'Unfinished workflows after {timeout} minutes.'
raise Exception(msg.format(timeout))
all_succeeded = True
if verbose:
print('--- polling from cromwell ---')
for uuid in workflow_ids:
response = cls.status(uuid, auth)
status = cls._parse_workflow_status(response)
if verbose:
print(f'Workflow {uuid} returned status {status}')
if status in _failed_statuses:
raise exceptions.WorkflowFailedError(
f'Workflow {uuid} returned status {status}'
)
elif status != 'Succeeded':
all_succeeded = False
if all_succeeded:
print('All workflows succeeded!')
return ''
time.sleep(poll_interval_seconds)
@classmethod
def release_hold(
cls: 'CromwellAPI',
uuid: str,
auth: CromwellAuth,
raise_for_status: bool = False,
) -> requests.Response:
"""Request Cromwell to release the hold on a workflow.
It will switch the status of a workflow from 'On Hold' to 'Submitted' so it can be picked for running. For
a workflow that was not submitted with `workflowOnHold = true`, Cromwell will throw an error.
Args:
uuid: A Cromwell workflow UUID, which is the workflow identifier. The workflow is expected to have
`On Hold` status.
auth: The authentication class holding headers or auth
information to a Cromwell server.
raise_for_status: Whether to check and raise for status based on the response.
Raises:
requests.exceptions.HTTPError: This will be raised when raise_for_status is True and Cromwell returns
a response that satisfies 400 <= response.status_code < 600.
Returns:
HTTP response from Cromwell.
"""
response = requests.post(
url=auth.url + cls._release_hold_endpoint.format(uuid=uuid),
auth=auth.auth,
headers=auth.header,
)
if raise_for_status:
cls._check_and_raise_status(response)
return response
@classmethod
def query(
cls: 'CromwellAPI',
query_dict: Dict[str, Union[str, List[str], Dict[str, str], bool]],
auth: CromwellAuth,
raise_for_status: bool = False,
) -> requests.Response:
"""Query for workflows.
TODO: Given that Cromwell-as-a-Service blocks a set of features that are available in Cromwell, e.g. 'labelor',
for security concerns, the first iteration of this API doesn't come up with the advanced query keys of the
Cromwell except a set of necessary ones. However, we need to implement this for completeness and keep an eye
on the compatibility between CaaS and Cromwell.
All of the query keys will be used in an OR manner, except the keys within `labels`, which are defined in
an AND relation. For instance, [{'status': 'Succeeded'}, {'status': 'Failed'}] will give you all of the
workflows that in either `Succeeded` or `Failed` statuses.
Args:
query_dict: A dictionary representing the query key-value paris. The keys should be accepted by the
Cromwell or they will get ignored. The values could be str, list or dict.
auth: The authentication class holding headers or auth
information to a Cromwell server.
raise_for_status: Whether to check and raise for status based on the response.
Raises:
requests.exceptions.HTTPError: This will be raised when raise_for_status is True and Cromwell returns
a response that satisfies 400 <= response.status_code < 600.
Returns:
HTTP response from Cromwell.
"""
if (
'additionalQueryResultFields' in query_dict.keys()
or 'includeSubworkflows' in query_dict.keys()
):
logging.warning(
'Note: additionalQueryResultFields, includeSubworkflows may not scale due to the '
'following issues with Cromwell: https://github.com/broadinstitute/cromwell/issues/3115 '
'and https://github.com/broadinstitute/cromwell/issues/3873'
)
query_params = cls._compose_query_params(query_dict)
response = requests.post(
url=auth.url + cls._query_endpoint,
json=query_params,
auth=auth.auth,
headers=auth.header,
)
if raise_for_status:
cls._check_and_raise_status(response)
return response
@classmethod
def patch_labels(
cls: 'CromwellAPI',
uuid: str,
labels: Dict[str, str],
auth: CromwellAuth,
raise_for_status: bool = False,
) -> requests.Response:
"""Add new labels or patch existing labels for an existing workflow.
Args:
uuid: A Cromwell workflow UUID, which is the workflow identifier.
labels: A dictionary representing the label key-value pairs.
auth: The authentication class holding headers or auth
information to a Cromwell server.
raise_for_status: Whether to check and raise for status based on the response.
Raises:
requests.exceptions.HTTPError: This will be raised when raise_for_status is True and Cromwell returns
a response that satisfies 400 <= response.status_code < 600.
Returns:
HTTP response from Cromwell.
"""
response = requests.patch(
url=auth.url + cls._labels_endpoint.format(uuid=uuid),
json=labels,
auth=auth.auth,
headers=auth.header,
)
if raise_for_status:
cls._check_and_raise_status(response)
return response
@classmethod
def _compose_query_params(
cls: 'CromwellAPI',
query_dict: Dict[str, Union[str, List[str], Dict[str, str], bool]],
) -> List[Dict[str, str]]:
"""Helper function to compose the query params that could be accepted by Cromwell.
This function will parse and compose the query params for Cromwell's /query endpoint from an user's input
query dictionary. It also provides very basic inputs validation so users don't have to wait for the error
response from Cromwell for a long time.
The query keys should be one of the following strings in the `cls._cromwell_query_keys` set, otherwise
they will be ignore by this function.
In general, this method is expecting the input query dictionary follows a basic
structure like below:
```
query_dict = {
'label': {
'cromwell-workflow-id': 'cromwell-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
},
'status': ['Running', 'Succeeded'],
'id': 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx',
'additionalQueryResultFields': 'labels',
'submission': '2018-01-01T00:01:01.410150Z',
'start': '2018-01-01T01:01:01.410150Z',
'end': '2018-01-01T02:01:01.410150Z',
'name': ['WorkflowName1', 'WorkflowName2'],
'additionalQueryResultFields': ['labels', 'parentWorkflowId'],
'includeSubworkflows': True
}
```
which will be converted to the following query parameters:
```
query_params = [
{'label': 'cromwell-workflow-id:cromwell-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'},
{'status': 'Running'},
{'status': 'Succeeded'},
{'id': 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'},
{'additionalQueryResultFields': 'labels'},
{'submission': '2018-01-01T00:01:01.410150Z'},
{'start': '2018-01-01T01:01:01.410150Z'},
{'end': '2018-01-01T02:01:01.410150Z'},
{'name': 'WorkflowName1'},
{'name': 'WorkflowName2'},
{'additionalQueryResultFields': 'labels'},
{'additionalQueryResultFields': 'parentWorkflowId'},
{'includeSubworkflows': 'true'}
]
```
Args:
query_dict: A dictionary representing the query key-value paris. The keys should be accepted by the
Cromwell or they will get ignored. The values could be str, list or dict.
Raises:
TypeError: If the input query_dict is not a dictionary.
ValueError: If a list of values are assigned to a query key that belongs to _cromwell_exclusive_query_keys.
Returns:
query_params: A composed list of query objects.
"""
if not isinstance(query_dict, dict):
raise TypeError('A valid dictionary with query keys is required!')
query_params = []
for k, v in query_dict.items():
if k in _cromwell_query_keys:
if k == 'label' and isinstance(v, dict):
query_params.extend(
[
{'label': label_key + ':' + label_value}
for label_key, label_value in v.items()
]
)
elif isinstance(v, list):
if k in _cromwell_exclusive_query_keys:
raise ValueError(
'{} cannot be specified multiple times!'.format(k)
)
query_params.extend(
[
{k: json.dumps(val)}
if not isinstance(val, str)
else {k: val}
for val in set(v)
]
)
else:
query_params.append(
{k: json.dumps(v)} if not isinstance(v, str) else {k: v}
)
else:
logger.info(
'{} is not an allowed query key in Cromwell, will be ignored in this query.'.format(
k
)
)
return query_params
@staticmethod
def _parse_workflow_status(response: requests.Response) -> str:
"""Helper function to parse a status response.
Args:
response: A status response object from Cromwell.
Raises:
WorkflowUnknownError: This will be raised when Cromwell returns a status code != 200.
Returns:
String representing status response.
"""
if response.status_code != 200:
raise exceptions.WorkflowUnknownError(
'Status could not be determined, endpoint returned {0}'.format(
response.status_code
)
)
else:
return response.json()['status']
@staticmethod
def _check_and_raise_status(response: requests.Response) -> None:
"""Helper function to check the status of a response and raise a friendly message if there are errors.
This functions is using the `response.ok` which wraps the `raise_for_status()`, by doing this, we can
produce the actual error messages from the Cromwell, instead of shadowing them with `raise_for_status()`.
Args:
response: A status response object from Cromwell.
Raises:
requests.exceptions.HTTPError: This will be raised when Cromwell returns a response that satisfies
400 <= response.status_code < 600.
"""
if not response.ok:
raise requests.exceptions.HTTPError(
'Error Code {0}: {1}'.format(response.status_code, response.text)
)
| 39.076175 | 119 | 0.610411 |
d85aa32c5da616f76675b356404a20a68d559f68 | 1,416 | py | Python | xlsxwriter/test/comparison/test_chart_display_units12.py | Aeon1/XlsxWriter | 6871b6c3fe6c294632054ea91f23d9e27068bcc1 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2019-07-25T06:08:09.000Z | 2019-11-01T02:33:56.000Z | xlsxwriter/test/comparison/test_chart_display_units12.py | Aeon1/XlsxWriter | 6871b6c3fe6c294632054ea91f23d9e27068bcc1 | [
"BSD-2-Clause-FreeBSD"
] | 13 | 2019-07-14T00:29:05.000Z | 2019-11-26T06:16:46.000Z | xlsxwriter/test/comparison/test_chart_display_units12.py | Aeon1/XlsxWriter | 6871b6c3fe6c294632054ea91f23d9e27068bcc1 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_display_units12.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'scatter'})
chart.axis_ids = [93550464, 93548544]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
worksheet.write_column(0, 1, data[0])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5'
})
chart.set_y_axis({'display_units': 'hundreds',
'display_units_visible': False})
chart.set_x_axis({'display_units': 'thousands',
'display_units_visible': False})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 26.222222 | 79 | 0.573446 |
bdbff41a1d33d9265c3ea1ee9f74e373d15d432a | 2,388 | py | Python | pycipher/util.py | onlykood/pycipher | 8f1d7cf3cba4e12171e27d9ce723ad890194de19 | [
"MIT"
] | 196 | 2015-01-16T19:09:19.000Z | 2022-03-13T16:19:21.000Z | pycipher/util.py | rafaelmessias/pycipher | 787eb947a173138869ddd388b5331559e5cd3a5a | [
"MIT"
] | 9 | 2015-10-09T18:07:32.000Z | 2021-12-22T12:04:00.000Z | pycipher/util.py | rafaelmessias/pycipher | 787eb947a173138869ddd388b5331559e5cd3a5a | [
"MIT"
] | 76 | 2015-02-08T23:17:43.000Z | 2021-12-27T04:15:30.000Z | '''
some statistics routines for cryptanalysis
'''
import math
import re
def ic(ctext):
''' takes ciphertext, calculates index of coincidence.'''
counts = ngram_count(ctext,N=1)
icval = 0
for k in counts.keys():
icval += counts[k]*(counts[k]-1)
icval /= (len(ctext)*(len(ctext)-1))
return icval
def ngram_count(text,N=1,keep_punct=False):
''' if N=1, return a dict containing each letter along with how many times the letter occurred.
if N=2, returns a dict containing counts of each bigram (pair of letters)
etc.
There is an option to remove all spaces and punctuation prior to processing '''
if not keep_punct: text = re.sub('[^A-Z]','',text.upper())
count = {}
for i in range(len(text)-N+1):
c = text[i:i+N]
if c in count: count[c] += 1
else: count[c] = 1.0
return count
def ngram_freq(text,N=1,log=False,floor=0.01):
''' returns the n-gram frequencies of all n-grams encountered in text.
Option to return log probabilities or standard probabilities.
Note that only n-grams occurring in 'text' will have probabilities.
For the probability of not-occurring n-grams, use freq['floor'].
This is set to floor/len(text) '''
freq = ngram_count(text,N)
L = 1.0*(len(text)-N+1)
for c in freq.keys():
if log: freq[c] = math.log10(freq[c]/L)
else: freq[c] = freq[c]/L
if log: freq['floor'] = math.log10(floor/L)
else: freq['floor'] = floor/L
return freq
def restore_punctuation(original,modified):
''' If punctuation was accidently removed, use this function to restore it.
requires the orignial string with punctuation. '''
ret = ''
count = 0
try:
for c in original:
if c.isalpha():
ret+=modified[count]
count+=1
else: ret+=c
except IndexError:
print('restore_punctuation: strings must have same number of alphabetic chars')
raise
return ret
def keyword_to_key(word,alphabet='ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
''' convert a key word to a key by appending on the other letters of the alphabet.
e.g. MONARCHY -> MONARCHYBDEFGIJKLPQSTUVWXZ
'''
ret = ''
word = (word + alphabet).upper()
for i in word:
if i in ret: continue
ret += i
return ret
| 33.633803 | 99 | 0.617253 |
a2a2a79aa05439ecfc1beee53fa27da2eb4a1669 | 1,121 | py | Python | third_party/dashboard/tag_solution.py | nya3jp/icfpc2021 | 4ed656aa0ecfc697e48430cbb0dca2c6adfc46c9 | [
"Apache-2.0"
] | null | null | null | third_party/dashboard/tag_solution.py | nya3jp/icfpc2021 | 4ed656aa0ecfc697e48430cbb0dca2c6adfc46c9 | [
"Apache-2.0"
] | null | null | null | third_party/dashboard/tag_solution.py | nya3jp/icfpc2021 | 4ed656aa0ecfc697e48430cbb0dca2c6adfc46c9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# Copyright 2021 Team Special Weekend
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('solution_id', metavar='SOLUTION_ID', type=int, help='solution ID')
parser.add_argument('--tag', dest='tags', metavar='TAG', action='append', default=[], help='tag')
args = parser.parse_args()
for tag in args.tags:
subprocess.run([
'curl',
'-X',
'POST',
'https://spweek.badalloc.com/api/solutions/{}/tags?tag={}'.format(args.solution_id, tag),
])
| 33.969697 | 97 | 0.717217 |
e2f8f924e9a9254d3d55a4292db5f4fbe4ae42b2 | 3,057 | py | Python | demo.py | hassiweb/mitemp | 50eaed19d5fda9d6d642a4e009d54291299ea3c8 | [
"MIT"
] | null | null | null | demo.py | hassiweb/mitemp | 50eaed19d5fda9d6d642a4e009d54291299ea3c8 | [
"MIT"
] | null | null | null | demo.py | hassiweb/mitemp | 50eaed19d5fda9d6d642a4e009d54291299ea3c8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Demo file showing how to use the mitemp library."""
import argparse
import re
import logging
import sys
from btlewrap import available_backends, BluepyBackend, GatttoolBackend, PygattBackend
from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, \
MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY
def valid_mitemp_mac(mac, pat=re.compile(r"(4C:65:A8|58:2D:34):[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}")):
"""Check for valid mac adresses."""
if not pat.match(mac.upper()):
raise argparse.ArgumentTypeError('The MAC address "{}" seems to be in the wrong format'.format(mac))
return mac
def poll(args):
"""Poll data from the sensor."""
backend = _get_backend(args)
poller = MiTempBtPoller(args.mac, backend)
print("Getting data from Mi Temperature and Humidity Sensor")
print("FW: {}".format(poller.firmware_version()))
print("Name: {}".format(poller.name()))
print("Battery: {}".format(poller.parameter_value(MI_BATTERY)))
print("Temperature: {}".format(poller.parameter_value(MI_TEMPERATURE)))
print("Humidity: {}".format(poller.parameter_value(MI_HUMIDITY)))
# def scan(args):
# """Scan for sensors."""
# backend = _get_backend(args)
# print('Scanning for 10 seconds...')
# devices = mitemp_scanner.scan(backend, 10)
# devices = []
# print('Found {} devices:'.format(len(devices)))
# for device in devices:
# print(' {}'.format(device))
def _get_backend(args):
"""Extract the backend class from the command line arguments."""
if args.backend == 'gatttool':
backend = GatttoolBackend
elif args.backend == 'bluepy':
backend = BluepyBackend
elif args.backend == 'pygatt':
backend = PygattBackend
else:
raise Exception('unknown backend: {}'.format(args.backend))
return backend
def list_backends(_):
"""List all available backends."""
backends = [b.__name__ for b in available_backends()]
print('\n'.join(backends))
def main():
"""Main function.
Mostly parsing the command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--backend', choices=['gatttool', 'bluepy', 'pygatt'], default='gatttool')
parser.add_argument('-v', '--verbose', action='store_const', const=True)
subparsers = parser.add_subparsers(help='sub-command help', )
parser_poll = subparsers.add_parser('poll', help='poll data from a sensor')
parser_poll.add_argument('mac', type=valid_mitemp_mac)
parser_poll.set_defaults(func=poll)
# parser_scan = subparsers.add_parser('scan', help='scan for devices')
# parser_scan.set_defaults(func=scan)
parser_scan = subparsers.add_parser('backends', help='list the available backends')
parser_scan.set_defaults(func=list_backends)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
if not hasattr(args, "func"):
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == '__main__':
main()
| 31.515464 | 108 | 0.67648 |
fe6b01ced4e1704dc945733c200b98ff77e2dac3 | 2,095 | py | Python | homeassistant/components/tapsaff/binary_sensor.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 3 | 2020-01-21T18:09:09.000Z | 2022-01-17T08:06:03.000Z | homeassistant/components/tapsaff/binary_sensor.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 39 | 2016-12-16T12:40:34.000Z | 2017-02-13T17:53:42.000Z | homeassistant/components/tapsaff/binary_sensor.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 6 | 2020-04-10T06:21:11.000Z | 2021-07-01T08:53:38.000Z | """Support for Taps Affs."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorDevice
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_LOCATION = "location"
DEFAULT_NAME = "Taps Aff"
SCAN_INTERVAL = timedelta(minutes=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_LOCATION): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Taps Aff binary sensor."""
name = config.get(CONF_NAME)
location = config.get(CONF_LOCATION)
taps_aff_data = TapsAffData(location)
add_entities([TapsAffSensor(taps_aff_data, name)], True)
class TapsAffSensor(BinarySensorDevice):
"""Implementation of a Taps Aff binary sensor."""
def __init__(self, taps_aff_data, name):
"""Initialize the Taps Aff sensor."""
self.data = taps_aff_data
self._name = name
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name}"
@property
def is_on(self):
"""Return true if taps aff."""
return self.data.is_taps_aff
def update(self):
"""Get the latest data."""
self.data.update()
class TapsAffData:
"""Class for handling the data retrieval for pins."""
def __init__(self, location):
"""Initialize the data object."""
from tapsaff import TapsAff
self._is_taps_aff = None
self.taps_aff = TapsAff(location)
@property
def is_taps_aff(self):
"""Return true if taps aff."""
return self._is_taps_aff
def update(self):
"""Get the latest data from the Taps Aff API and updates the states."""
try:
self._is_taps_aff = self.taps_aff.is_taps_aff
except RuntimeError:
_LOGGER.error("Update failed. Check configured location")
| 25.864198 | 86 | 0.67494 |
442e0a02745c4a2e7cb61804dbf5c38f4e3854cc | 5,189 | py | Python | tests/unit/test_prestoclient.py | leniartek/trino-admin | 05104a0b35bbc4aeca9469b2fc63a21c814a7855 | [
"Apache-2.0"
] | 19 | 2019-06-12T13:33:18.000Z | 2020-12-18T09:09:22.000Z | tests/unit/test_prestoclient.py | leniartek/trino-admin | 05104a0b35bbc4aeca9469b2fc63a21c814a7855 | [
"Apache-2.0"
] | 19 | 2019-05-16T13:09:25.000Z | 2020-12-04T18:01:39.000Z | tests/unit/test_prestoclient.py | leniartek/trino-admin | 05104a0b35bbc4aeca9469b2fc63a21c814a7855 | [
"Apache-2.0"
] | 15 | 2019-03-07T16:37:06.000Z | 2020-11-12T12:07:46.000Z | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
from httplib import HTTPException, HTTPConnection
from fabric.operations import _AttributeString
from mock import patch, PropertyMock
from prestoadmin.prestoclient import URL_TIMEOUT_MS, PrestoClient
from prestoadmin.util.exception import InvalidArgumentError
from tests.base_test_case import BaseTestCase
from tests.unit.base_unit_case import PRESTO_CONFIG
@patch('prestoadmin.util.presto_config.PrestoConfig.coordinator_config',
return_value=PRESTO_CONFIG)
class TestPrestoClient(BaseTestCase):
def test_no_sql(self, mock_presto_config):
client = PrestoClient('any_host', 'any_user')
self.assertRaisesRegexp(InvalidArgumentError,
"SQL query missing",
client.run_sql, "", )
def test_no_server(self, mock_presto_config):
client = PrestoClient("", 'any_user')
self.assertRaisesRegexp(InvalidArgumentError,
"Server IP missing",
client.run_sql, "any_sql")
def test_no_user(self, mock_presto_config):
client = PrestoClient('any_host', "")
self.assertRaisesRegexp(InvalidArgumentError,
"Username missing",
client.run_sql, "any_sql")
@patch.object(PrestoClient, '_create_auth_headers', return_value={'X-Presto-Internal-Bearer': 'any_bearer'})
@patch('prestoadmin.prestoclient.HTTPConnection')
def test_default_request_called(self, mock_conn, mock_auth_header, mock_presto_config):
client = PrestoClient('any_host', 'any_user')
headers = {"X-Presto-Catalog": "hive", "X-Presto-Schema": "default",
"X-Presto-Source": "presto-admin",
"X-Presto-Internal-Bearer": "any_bearer"}
client.run_sql("any_sql")
mock_conn.assert_called_with('any_host', 8080, False, URL_TIMEOUT_MS)
mock_conn().request.assert_called_with("POST", "/v1/statement",
"any_sql", headers)
self.assertTrue(mock_conn().getresponse.called)
@patch('prestoadmin.prestoclient.HTTPConnection')
def test_connection_failed(self, mock_conn, mock_presto_config):
client = PrestoClient('any_host', 'any_user')
client.run_sql("any_sql")
self.assertTrue(mock_conn().close.called)
self.assertFalse(client.run_sql("any_sql"))
@patch('prestoadmin.prestoclient.HTTPConnection')
def test_http_call_failed(self, mock_conn, mock_presto_config):
client = PrestoClient('any_host', 'any_user')
mock_conn.side_effect = HTTPException("Error")
self.assertFalse(client.run_sql("any_sql"))
mock_conn.side_effect = socket.error("Error")
self.assertFalse(client.run_sql("any_sql"))
@patch.object(HTTPConnection, 'request')
@patch.object(HTTPConnection, 'getresponse')
def test_http_answer_valid(self, mock_response, mock_request, mock_presto_config):
client = PrestoClient('any_host', 'any_user')
mock_response.return_value.read.return_value = '{}'
type(mock_response.return_value).status = \
PropertyMock(return_value=200)
self.assertEquals(client.run_sql('any_sql'), [])
@patch.object(HTTPConnection, 'request')
@patch.object(HTTPConnection, 'getresponse')
def test_http_answer_not_json(self, mock_response,
mock_request, mock_presto_config):
client = PrestoClient('any_host', 'any_user')
mock_response.return_value.read.return_value = 'NOT JSON!'
type(mock_response.return_value).status =\
PropertyMock(return_value=200)
self.assertRaisesRegexp(ValueError, 'No JSON object could be decoded',
client.run_sql, 'any_sql')
@patch('prestoadmin.prestoclient.HTTPConnection')
@patch('prestoadmin.util.remote_config_util.sudo')
def testrun_sql_get_port(self, sudo_mock, conn_mock, mock_presto_config):
client = PrestoClient('any_host', 'any_user')
client.rows = ['hello']
client.next_uri = 'hello'
client.response_from_server = {'hello': 'hello'}
sudo_mock.return_value = _AttributeString('http-server.http.port=8080')
sudo_mock.return_value.failed = False
sudo_mock.return_value.return_code = 0
client.run_sql('select * from nation')
self.assertEqual(client.port, 8080)
self.assertEqual(client.rows, [])
self.assertEqual(client.next_uri, '')
self.assertEqual(client.response_from_server, {})
| 45.517544 | 112 | 0.678358 |
02d67f60f3f43e9d0dce58e18bd362265a3be382 | 6,389 | py | Python | hanse_ros/hanse_pipefollowing/src/hanse_pipefollowing/cfg/PipeFollowingConfig.py | iti-luebeck/HANSE2012 | fd2348823a6a51baf87cd493529f085fb22d65a7 | [
"BSD-3-Clause"
] | null | null | null | hanse_ros/hanse_pipefollowing/src/hanse_pipefollowing/cfg/PipeFollowingConfig.py | iti-luebeck/HANSE2012 | fd2348823a6a51baf87cd493529f085fb22d65a7 | [
"BSD-3-Clause"
] | null | null | null | hanse_ros/hanse_pipefollowing/src/hanse_pipefollowing/cfg/PipeFollowingConfig.py | iti-luebeck/HANSE2012 | fd2348823a6a51baf87cd493529f085fb22d65a7 | [
"BSD-3-Clause"
] | null | null | null | ## *********************************************************
##
## File autogenerated for the hanse_pipefollowing package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
##**********************************************************
## Software License Agreement (BSD License)
##
## Copyright (c) 2008, Willow Garage, Inc.
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions
## are met:
##
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above
## copyright notice, this list of conditions and the following
## disclaimer in the documentation and/or other materials provided
## with the distribution.
## * Neither the name of the Willow Garage nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
## FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
## COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
## CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
## LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
## ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
##**********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 233, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/fuerte/stacks/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 259, 'description': '', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/fuerte/stacks/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator.py', 'name': 'minSize', 'edit_method': '', 'default': 0.05, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 259, 'description': '', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/fuerte/stacks/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator.py', 'name': 'maxSize', 'edit_method': '', 'default': 0.5, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 259, 'description': '', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/fuerte/stacks/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator.py', 'name': 'fwSpeed', 'edit_method': '', 'default': 0.5, 'level': 0, 'min': 0.1, 'type': 'double'}, {'srcline': 259, 'description': 'in radians', 'max': 0.7853981633974483, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/fuerte/stacks/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator.py', 'name': 'deltaAngle', 'edit_method': '', 'default': 0.192, 'level': 0, 'min': 0.01, 'type': 'double'}, {'srcline': 259, 'description': '', 'max': 600.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/fuerte/stacks/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator.py', 'name': 'deltaDist', 'edit_method': '', 'default': 100.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 259, 'description': '', 'max': 5.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/fuerte/stacks/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator.py', 'name': 'kpAngle', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': -5.0, 'type': 'double'}, {'srcline': 259, 'description': '', 'max': 5.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/fuerte/stacks/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator.py', 'name': 'kpDist', 'edit_method': '', 'default': 0.1, 'level': 0, 'min': -5.0, 'type': 'double'}, {'srcline': 259, 'description': '', 'max': 640.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/fuerte/stacks/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator.py', 'name': 'robCenterX', 'edit_method': '', 'default': 320.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 259, 'description': '', 'max': 480.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/fuerte/stacks/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator.py', 'name': 'robCenterY', 'edit_method': '', 'default': 240.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 259, 'description': '', 'max': 600.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/fuerte/stacks/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator.py', 'name': 'maxDistance', 'edit_method': '', 'default': 320.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 259, 'description': '', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/fuerte/stacks/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator.py', 'name': 'mirror', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
| 89.985915 | 3,716 | 0.659728 |
ebdea2d94a228905ec9017496cf4c250fa11de4b | 28,915 | py | Python | eventlet/support/greendns.py | miguelgrinberg/eventlet | f1b63abd6db186c978077499f9670600da599d1a | [
"MIT"
] | 1 | 2018-10-13T15:57:29.000Z | 2018-10-13T15:57:29.000Z | eventlet/support/greendns.py | miguelgrinberg/eventlet | f1b63abd6db186c978077499f9670600da599d1a | [
"MIT"
] | null | null | null | eventlet/support/greendns.py | miguelgrinberg/eventlet | f1b63abd6db186c978077499f9670600da599d1a | [
"MIT"
] | 1 | 2019-12-21T10:21:53.000Z | 2019-12-21T10:21:53.000Z | '''greendns - non-blocking DNS support for Eventlet
'''
# Portions of this code taken from the gogreen project:
# http://github.com/slideinc/gogreen
#
# Copyright (c) 2005-2010 Slide, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import struct
import sys
from eventlet import patcher
from eventlet.green import _socket_nodns
from eventlet.green import os
from eventlet.green import time
from eventlet.green import select
from eventlet.support import six
def import_patched(module_name):
# Import cycle note: it's crucial to use _socket_nodns here because
# regular evenlet.green.socket imports *this* module and if we imported
# it back we'd end with an import cycle (socket -> greendns -> socket).
# We break this import cycle by providing a restricted socket module.
# if (module_name + '.').startswith('dns.'):
# module_name = 'eventlet.support.' + module_name
modules = {
'select': select,
'time': time,
'os': os,
'socket': _socket_nodns,
}
return patcher.import_patched(module_name, **modules)
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
dns = import_patched('dns')
for pkg in dns.__all__:
setattr(dns, pkg, import_patched('dns.' + pkg))
for pkg in dns.rdtypes.__all__:
setattr(dns.rdtypes, pkg, import_patched('dns.rdtypes.' + pkg))
for pkg in dns.rdtypes.IN.__all__:
setattr(dns.rdtypes.IN, pkg, import_patched('dns.rdtypes.IN.' + pkg))
for pkg in dns.rdtypes.ANY.__all__:
setattr(dns.rdtypes.ANY, pkg, import_patched('dns.rdtypes.ANY.' + pkg))
del import_patched
sys.path.pop(0)
socket = _socket_nodns
DNS_QUERY_TIMEOUT = 10.0
HOSTS_TTL = 10.0
EAI_EAGAIN_ERROR = socket.gaierror(socket.EAI_AGAIN, 'Lookup timed out')
EAI_NONAME_ERROR = socket.gaierror(socket.EAI_NONAME, 'Name or service not known')
# EAI_NODATA was removed from RFC3493, it's now replaced with EAI_NONAME
# socket.EAI_NODATA is not defined on FreeBSD, probably on some other platforms too.
# https://lists.freebsd.org/pipermail/freebsd-ports/2003-October/005757.html
EAI_NODATA_ERROR = EAI_NONAME_ERROR
if (os.environ.get('EVENTLET_DEPRECATED_EAI_NODATA', '').lower() in ('1', 'y', 'yes')
and hasattr(socket, 'EAI_NODATA')):
EAI_NODATA_ERROR = socket.gaierror(socket.EAI_NODATA, 'No address associated with hostname')
def is_ipv4_addr(host):
"""Return True if host is a valid IPv4 address"""
if not isinstance(host, six.string_types):
return False
try:
dns.ipv4.inet_aton(host)
except dns.exception.SyntaxError:
return False
else:
return True
def is_ipv6_addr(host):
"""Return True if host is a valid IPv6 address"""
if not isinstance(host, six.string_types):
return False
host = host.split('%', 1)[0]
try:
dns.ipv6.inet_aton(host)
except dns.exception.SyntaxError:
return False
else:
return True
def is_ip_addr(host):
"""Return True if host is a valid IPv4 or IPv6 address"""
return is_ipv4_addr(host) or is_ipv6_addr(host)
class HostsAnswer(dns.resolver.Answer):
"""Answer class for HostsResolver object"""
def __init__(self, qname, rdtype, rdclass, rrset, raise_on_no_answer=True):
"""Create a new answer
:qname: A dns.name.Name instance of the query name
:rdtype: The rdatatype of the query
:rdclass: The rdataclass of the query
:rrset: The dns.rrset.RRset with the response, must have ttl attribute
:raise_on_no_answer: Whether to raise dns.resolver.NoAnswer if no
answer.
"""
self.response = None
self.qname = qname
self.rdtype = rdtype
self.rdclass = rdclass
self.canonical_name = qname
if not rrset and raise_on_no_answer:
raise dns.resolver.NoAnswer()
self.rrset = rrset
self.expiration = (time.time() +
rrset.ttl if hasattr(rrset, 'ttl') else 0)
class HostsResolver(object):
"""Class to parse the hosts file
Attributes
----------
:fname: The filename of the hosts file in use.
:interval: The time between checking for hosts file modification
"""
LINES_RE = re.compile(r"""
\s* # Leading space
([^\r\n#]+?) # The actual match, non-greedy so as not to include trailing space
\s* # Trailing space
(?:[#][^\r\n]+)? # Comments
(?:$|[\r\n]+) # EOF or newline
""", re.VERBOSE)
def __init__(self, fname=None, interval=HOSTS_TTL):
self._v4 = {} # name -> ipv4
self._v6 = {} # name -> ipv6
self._aliases = {} # name -> canonical_name
self.interval = interval
self.fname = fname
if fname is None:
if os.name == 'posix':
self.fname = '/etc/hosts'
elif os.name == 'nt':
self.fname = os.path.expandvars(
r'%SystemRoot%\system32\drivers\etc\hosts')
self._last_load = 0
if self.fname:
self._load()
def _readlines(self):
"""Read the contents of the hosts file
Return list of lines, comment lines and empty lines are
excluded.
Note that this performs disk I/O so can be blocking.
"""
try:
with open(self.fname, 'rb') as fp:
fdata = fp.read()
except (IOError, OSError):
return []
udata = fdata.decode(errors='ignore')
return self.LINES_RE.findall(udata)
def _load(self):
"""Load hosts file
This will unconditionally (re)load the data from the hosts
file.
"""
lines = self._readlines()
self._v4.clear()
self._v6.clear()
self._aliases.clear()
for line in lines:
parts = line.split()
if len(parts) < 2:
continue
ip = parts.pop(0)
if is_ipv4_addr(ip):
ipmap = self._v4
elif is_ipv6_addr(ip):
if ip.startswith('fe80'):
# Do not use link-local addresses, OSX stores these here
continue
ipmap = self._v6
else:
continue
cname = parts.pop(0)
ipmap[cname] = ip
for alias in parts:
ipmap[alias] = ip
self._aliases[alias] = cname
self._last_load = time.time()
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None, raise_on_no_answer=True):
"""Query the hosts file
The known rdtypes are dns.rdatatype.A, dns.rdatatype.AAAA and
dns.rdatatype.CNAME.
The ``rdclass`` parameter must be dns.rdataclass.IN while the
``tcp`` and ``source`` parameters are ignored.
Return a HostAnswer instance or raise a dns.resolver.NoAnswer
exception.
"""
now = time.time()
if self._last_load + self.interval < now:
self._load()
rdclass = dns.rdataclass.IN
if isinstance(qname, six.string_types):
name = qname
qname = dns.name.from_text(qname)
else:
name = str(qname)
rrset = dns.rrset.RRset(qname, rdclass, rdtype)
rrset.ttl = self._last_load + self.interval - now
if rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.A:
addr = self._v4.get(name)
if not addr and qname.is_absolute():
addr = self._v4.get(name[:-1])
if addr:
rrset.add(dns.rdtypes.IN.A.A(rdclass, rdtype, addr))
elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.AAAA:
addr = self._v6.get(name)
if not addr and qname.is_absolute():
addr = self._v6.get(name[:-1])
if addr:
rrset.add(dns.rdtypes.IN.AAAA.AAAA(rdclass, rdtype, addr))
elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.CNAME:
cname = self._aliases.get(name)
if not cname and qname.is_absolute():
cname = self._aliases.get(name[:-1])
if cname:
rrset.add(dns.rdtypes.ANY.CNAME.CNAME(
rdclass, rdtype, dns.name.from_text(cname)))
return HostsAnswer(qname, rdtype, rdclass, rrset, raise_on_no_answer)
def getaliases(self, hostname):
"""Return a list of all the aliases of a given cname"""
# Due to the way store aliases this is a bit inefficient, this
# clearly was an afterthought. But this is only used by
# gethostbyname_ex so it's probably fine.
aliases = []
if hostname in self._aliases:
cannon = self._aliases[hostname]
else:
cannon = hostname
aliases.append(cannon)
for alias, cname in six.iteritems(self._aliases):
if cannon == cname:
aliases.append(alias)
aliases.remove(hostname)
return aliases
class ResolverProxy(object):
"""Resolver class which can also use /etc/hosts
Initialise with a HostsResolver instance in order for it to also
use the hosts file.
"""
def __init__(self, hosts_resolver=None, filename='/etc/resolv.conf'):
"""Initialise the resolver proxy
:param hosts_resolver: An instance of HostsResolver to use.
:param filename: The filename containing the resolver
configuration. The default value is correct for both UNIX
and Windows, on Windows it will result in the configuration
being read from the Windows registry.
"""
self._hosts = hosts_resolver
self._filename = filename
self.clear()
def clear(self):
self._resolver = dns.resolver.Resolver(filename=self._filename)
self._resolver.cache = dns.resolver.LRUCache()
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None, raise_on_no_answer=True,
_hosts_rdtypes=(dns.rdatatype.A, dns.rdatatype.AAAA)):
"""Query the resolver, using /etc/hosts if enabled.
Behavior:
1. if hosts is enabled and contains answer, return it now
2. query nameservers for qname
3. if qname did not contain dots, pretend it was top-level domain,
query "foobar." and append to previous result
"""
result = [None, None, 0]
if qname is None:
qname = '0.0.0.0'
if isinstance(qname, six.string_types):
qname = dns.name.from_text(qname, None)
def step(fun, *args, **kwargs):
try:
a = fun(*args, **kwargs)
except Exception as e:
result[1] = e
return False
if a.rrset is not None and len(a.rrset):
if result[0] is None:
result[0] = a
else:
result[0].rrset.union_update(a.rrset)
result[2] += len(a.rrset)
return True
def end():
if result[0] is not None:
if raise_on_no_answer and result[2] == 0:
raise dns.resolver.NoAnswer
return result[0]
if result[1] is not None:
if raise_on_no_answer or not isinstance(result[1], dns.resolver.NoAnswer):
raise result[1]
raise dns.resolver.NXDOMAIN(qnames=(qname,))
if (self._hosts and (rdclass == dns.rdataclass.IN) and (rdtype in _hosts_rdtypes)):
if step(self._hosts.query, qname, rdtype, raise_on_no_answer=False):
if (result[0] is not None) or (result[1] is not None):
return end()
# Main query
step(self._resolver.query, qname, rdtype, rdclass, tcp, source, raise_on_no_answer=False)
# `resolv.conf` docs say unqualified names must resolve from search (or local) domain.
# However, common OS `getaddrinfo()` implementations append trailing dot (e.g. `db -> db.`)
# and ask nameservers, as if top-level domain was queried.
# This step follows established practice.
# https://github.com/nameko/nameko/issues/392
# https://github.com/eventlet/eventlet/issues/363
if len(qname) == 1:
step(self._resolver.query, qname.concatenate(dns.name.root),
rdtype, rdclass, tcp, source, raise_on_no_answer=False)
return end()
def getaliases(self, hostname):
"""Return a list of all the aliases of a given hostname"""
if self._hosts:
aliases = self._hosts.getaliases(hostname)
else:
aliases = []
while True:
try:
ans = self._resolver.query(hostname, dns.rdatatype.CNAME)
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
break
else:
aliases.extend(str(rr.target) for rr in ans.rrset)
hostname = ans[0].target
return aliases
resolver = ResolverProxy(hosts_resolver=HostsResolver())
def resolve(name, family=socket.AF_INET, raises=True, _proxy=None):
"""Resolve a name for a given family using the global resolver proxy.
This method is called by the global getaddrinfo() function.
Return a dns.resolver.Answer instance. If there is no answer it's
rrset will be emtpy.
"""
if family == socket.AF_INET:
rdtype = dns.rdatatype.A
elif family == socket.AF_INET6:
rdtype = dns.rdatatype.AAAA
else:
raise socket.gaierror(socket.EAI_FAMILY,
'Address family not supported')
if _proxy is None:
_proxy = resolver
try:
try:
return _proxy.query(name, rdtype, raise_on_no_answer=raises)
except dns.resolver.NXDOMAIN:
if not raises:
return HostsAnswer(dns.name.Name(name),
rdtype, dns.rdataclass.IN, None, False)
raise
except dns.exception.Timeout:
raise EAI_EAGAIN_ERROR
except dns.exception.DNSException:
raise EAI_NODATA_ERROR
def resolve_cname(host):
"""Return the canonical name of a hostname"""
try:
ans = resolver.query(host, dns.rdatatype.CNAME)
except dns.resolver.NoAnswer:
return host
except dns.exception.Timeout:
raise EAI_EAGAIN_ERROR
except dns.exception.DNSException:
raise EAI_NODATA_ERROR
else:
return str(ans[0].target)
def getaliases(host):
"""Return a list of for aliases for the given hostname
This method does translate the dnspython exceptions into
socket.gaierror exceptions. If no aliases are available an empty
list will be returned.
"""
try:
return resolver.getaliases(host)
except dns.exception.Timeout:
raise EAI_EAGAIN_ERROR
except dns.exception.DNSException:
raise EAI_NODATA_ERROR
def _getaddrinfo_lookup(host, family, flags):
"""Resolve a hostname to a list of addresses
Helper function for getaddrinfo.
"""
if flags & socket.AI_NUMERICHOST:
raise EAI_NONAME_ERROR
addrs = []
if family == socket.AF_UNSPEC:
err = None
for qfamily in [socket.AF_INET6, socket.AF_INET]:
try:
answer = resolve(host, qfamily, False)
except socket.gaierror as e:
if e.errno not in (socket.EAI_AGAIN, EAI_NONAME_ERROR.errno, EAI_NODATA_ERROR.errno):
raise
err = e
else:
if answer.rrset:
addrs.extend(rr.address for rr in answer.rrset)
if err is not None and not addrs:
raise err
elif family == socket.AF_INET6 and flags & socket.AI_V4MAPPED:
answer = resolve(host, socket.AF_INET6, False)
if answer.rrset:
addrs = [rr.address for rr in answer.rrset]
if not addrs or flags & socket.AI_ALL:
answer = resolve(host, socket.AF_INET, False)
if answer.rrset:
addrs = ['::ffff:' + rr.address for rr in answer.rrset]
else:
answer = resolve(host, family, False)
if answer.rrset:
addrs = [rr.address for rr in answer.rrset]
return str(answer.qname), addrs
def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0):
"""Replacement for Python's socket.getaddrinfo
This does the A and AAAA lookups asynchronously after which it
calls the OS' getaddrinfo(3) using the AI_NUMERICHOST flag. This
flag ensures getaddrinfo(3) does not use the network itself and
allows us to respect all the other arguments like the native OS.
"""
if isinstance(host, six.string_types):
host = host.encode('idna').decode('ascii')
if host is not None and not is_ip_addr(host):
qname, addrs = _getaddrinfo_lookup(host, family, flags)
else:
qname = host
addrs = [host]
aiflags = (flags | socket.AI_NUMERICHOST) & (0xffff ^ socket.AI_CANONNAME)
res = []
err = None
for addr in addrs:
try:
ai = socket.getaddrinfo(addr, port, family,
socktype, proto, aiflags)
except socket.error as e:
if flags & socket.AI_ADDRCONFIG:
err = e
continue
raise
res.extend(ai)
if not res:
if err:
raise err
raise socket.gaierror(socket.EAI_NONAME, 'No address found')
if flags & socket.AI_CANONNAME:
if not is_ip_addr(qname):
qname = resolve_cname(qname).encode('ascii').decode('idna')
ai = res[0]
res[0] = (ai[0], ai[1], ai[2], qname, ai[4])
return res
def gethostbyname(hostname):
"""Replacement for Python's socket.gethostbyname"""
if is_ipv4_addr(hostname):
return hostname
rrset = resolve(hostname)
return rrset[0].address
def gethostbyname_ex(hostname):
"""Replacement for Python's socket.gethostbyname_ex"""
if is_ipv4_addr(hostname):
return (hostname, [], [hostname])
ans = resolve(hostname)
aliases = getaliases(hostname)
addrs = [rr.address for rr in ans.rrset]
qname = str(ans.qname)
if qname[-1] == '.':
qname = qname[:-1]
return (qname, aliases, addrs)
def getnameinfo(sockaddr, flags):
"""Replacement for Python's socket.getnameinfo.
Currently only supports IPv4.
"""
try:
host, port = sockaddr
except (ValueError, TypeError):
if not isinstance(sockaddr, tuple):
del sockaddr # to pass a stdlib test that is
# hyper-careful about reference counts
raise TypeError('getnameinfo() argument 1 must be a tuple')
else:
# must be ipv6 sockaddr, pretending we don't know how to resolve it
raise EAI_NONAME_ERROR
if (flags & socket.NI_NAMEREQD) and (flags & socket.NI_NUMERICHOST):
# Conflicting flags. Punt.
raise EAI_NONAME_ERROR
if is_ipv4_addr(host):
try:
rrset = resolver.query(
dns.reversename.from_address(host), dns.rdatatype.PTR)
if len(rrset) > 1:
raise socket.error('sockaddr resolved to multiple addresses')
host = rrset[0].target.to_text(omit_final_dot=True)
except dns.exception.Timeout:
if flags & socket.NI_NAMEREQD:
raise EAI_EAGAIN_ERROR
except dns.exception.DNSException:
if flags & socket.NI_NAMEREQD:
raise EAI_NONAME_ERROR
else:
try:
rrset = resolver.query(host)
if len(rrset) > 1:
raise socket.error('sockaddr resolved to multiple addresses')
if flags & socket.NI_NUMERICHOST:
host = rrset[0].address
except dns.exception.Timeout:
raise EAI_EAGAIN_ERROR
except dns.exception.DNSException:
raise socket.gaierror(
(socket.EAI_NODATA, 'No address associated with hostname'))
if not (flags & socket.NI_NUMERICSERV):
proto = (flags & socket.NI_DGRAM) and 'udp' or 'tcp'
port = socket.getservbyport(port, proto)
return (host, port)
def _net_read(sock, count, expiration):
"""coro friendly replacement for dns.query._net_read
Read the specified number of bytes from sock. Keep trying until we
either get the desired amount, or we hit EOF.
A Timeout exception will be raised if the operation is not completed
by the expiration time.
"""
s = b''
while count > 0:
try:
n = sock.recv(count)
except socket.timeout:
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
if expiration - time.time() <= 0.0:
raise dns.exception.Timeout
if n == b'':
raise EOFError
count = count - len(n)
s = s + n
return s
def _net_write(sock, data, expiration):
"""coro friendly replacement for dns.query._net_write
Write the specified data to the socket.
A Timeout exception will be raised if the operation is not completed
by the expiration time.
"""
current = 0
l = len(data)
while current < l:
try:
current += sock.send(data[current:])
except socket.timeout:
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
if expiration - time.time() <= 0.0:
raise dns.exception.Timeout
def udp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
af=None, source=None, source_port=0, ignore_unexpected=False):
"""coro friendly replacement for dns.query.udp
Return the response obtained after sending a query via UDP.
@param q: the query
@type q: dns.message.Message
@param where: where to send the message
@type where: string containing an IPv4 or IPv6 address
@param timeout: The number of seconds to wait before the query times out.
If None, the default, wait forever.
@type timeout: float
@param port: The port to which to send the message. The default is 53.
@type port: int
@param af: the address family to use. The default is None, which
causes the address family to use to be inferred from the form of of where.
If the inference attempt fails, AF_INET is used.
@type af: int
@rtype: dns.message.Message object
@param source: source address. The default is the IPv4 wildcard address.
@type source: string
@param source_port: The port from which to send the message.
The default is 0.
@type source_port: int
@param ignore_unexpected: If True, ignore responses from unexpected
sources. The default is False.
@type ignore_unexpected: bool"""
wire = q.to_wire()
if af is None:
try:
af = dns.inet.af_for_address(where)
except:
af = dns.inet.AF_INET
if af == dns.inet.AF_INET:
destination = (where, port)
if source is not None:
source = (source, source_port)
elif af == dns.inet.AF_INET6:
destination = (where, port, 0, 0)
if source is not None:
source = (source, source_port, 0, 0)
s = socket.socket(af, socket.SOCK_DGRAM)
s.settimeout(timeout)
try:
expiration = dns.query._compute_expiration(timeout)
if source is not None:
s.bind(source)
try:
s.sendto(wire, destination)
except socket.timeout:
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
if expiration - time.time() <= 0.0:
raise dns.exception.Timeout
while 1:
try:
(wire, from_address) = s.recvfrom(65535)
except socket.timeout:
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
if expiration - time.time() <= 0.0:
raise dns.exception.Timeout
if from_address == destination:
break
if not ignore_unexpected:
raise dns.query.UnexpectedSource(
'got a response from %s instead of %s'
% (from_address, destination))
finally:
s.close()
r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac)
if not q.is_response(r):
raise dns.query.BadResponse()
return r
def tcp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
af=None, source=None, source_port=0):
"""coro friendly replacement for dns.query.tcp
Return the response obtained after sending a query via TCP.
@param q: the query
@type q: dns.message.Message object
@param where: where to send the message
@type where: string containing an IPv4 or IPv6 address
@param timeout: The number of seconds to wait before the query times out.
If None, the default, wait forever.
@type timeout: float
@param port: The port to which to send the message. The default is 53.
@type port: int
@param af: the address family to use. The default is None, which
causes the address family to use to be inferred from the form of of where.
If the inference attempt fails, AF_INET is used.
@type af: int
@rtype: dns.message.Message object
@param source: source address. The default is the IPv4 wildcard address.
@type source: string
@param source_port: The port from which to send the message.
The default is 0.
@type source_port: int"""
wire = q.to_wire()
if af is None:
try:
af = dns.inet.af_for_address(where)
except:
af = dns.inet.AF_INET
if af == dns.inet.AF_INET:
destination = (where, port)
if source is not None:
source = (source, source_port)
elif af == dns.inet.AF_INET6:
destination = (where, port, 0, 0)
if source is not None:
source = (source, source_port, 0, 0)
s = socket.socket(af, socket.SOCK_STREAM)
s.settimeout(timeout)
try:
expiration = dns.query._compute_expiration(timeout)
if source is not None:
s.bind(source)
try:
s.connect(destination)
except socket.timeout:
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
if expiration - time.time() <= 0.0:
raise dns.exception.Timeout
l = len(wire)
# copying the wire into tcpmsg is inefficient, but lets us
# avoid writev() or doing a short write that would get pushed
# onto the net
tcpmsg = struct.pack("!H", l) + wire
_net_write(s, tcpmsg, expiration)
ldata = _net_read(s, 2, expiration)
(l,) = struct.unpack("!H", ldata)
wire = _net_read(s, l, expiration)
finally:
s.close()
r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac)
if not q.is_response(r):
raise dns.query.BadResponse()
return r
def reset():
resolver.clear()
# Install our coro-friendly replacements for the tcp and udp query methods.
dns.query.tcp = tcp
dns.query.udp = udp
| 35.96393 | 101 | 0.615874 |
4506ae5029fd61a2a471f0b709d56c8ffe28d3c3 | 409 | py | Python | scripts/download_pin.py | CMUAbstract/POPT-HPCA21 | 53b5021846690d0f3445428c6380e877ecf7a10e | [
"MIT"
] | 8 | 2021-04-22T19:50:42.000Z | 2022-03-16T02:52:16.000Z | scripts/download_pin.py | CMUAbstract/POPT-HPCA21 | 53b5021846690d0f3445428c6380e877ecf7a10e | [
"MIT"
] | 2 | 2021-07-18T06:07:34.000Z | 2022-02-22T09:46:38.000Z | scripts/download_pin.py | CMUAbstract/POPT-HPCA21 | 53b5021846690d0f3445428c6380e877ecf7a10e | [
"MIT"
] | 5 | 2021-03-01T13:11:44.000Z | 2022-02-28T00:06:18.000Z | import os, subprocess
subprocess.call('wget http://software.intel.com/sites/landingpage/pintool/downloads/pin-2.14-71313-gcc.4.4.7-linux.tar.gz', shell=True)
subprocess.call('tar -xvzf pin-2.14-71313-gcc.4.4.7-linux.tar.gz', shell=True)
subprocess.call('rm -rf ../pin-2.14; mv pin-2.14-71313-gcc.4.4.7-linux ../pin-2.14/', shell=True)
subprocess.call('rm pin-2.14-71313-gcc.4.4.7-linux.tar.gz', shell=True)
| 51.125 | 135 | 0.723716 |
22542236dbf3730f4eb00041731f9328167ca8b1 | 9,194 | py | Python | answer_selection/xnet_plus/hyperparam_grid_search.py | shashiongithub/Document-Modeling-with-External-Information | 8db8dc4ab2d9a49af6523742ce9580aa22e12c8e | [
"BSD-2-Clause"
] | null | null | null | answer_selection/xnet_plus/hyperparam_grid_search.py | shashiongithub/Document-Modeling-with-External-Information | 8db8dc4ab2d9a49af6523742ce9580aa22e12c8e | [
"BSD-2-Clause"
] | null | null | null | answer_selection/xnet_plus/hyperparam_grid_search.py | shashiongithub/Document-Modeling-with-External-Information | 8db8dc4ab2d9a49af6523742ce9580aa22e12c8e | [
"BSD-2-Clause"
] | null | null | null | ####################################
# Author: Ronald Cardenas
# Date: July 2017
# Project: Document Modeling with External Attention for Sentence Extraction
####################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('../../common')
import math
import os
import random
import sys
import time
import pdb
import argparse
import numpy as np
import tensorflow as tf
import subprocess as sp
#from tensorflow.python import debug as tf_debug
from data_utils import DataProcessor, BatchData
from my_flags import FLAGS
from my_model import MY_Model
from model_docsum import accuracy_qas_top, mrr_metric
from train_test_utils import batch_predict_with_a_model, batch_load_data
from sklearn.model_selection import ParameterSampler, ParameterGrid
from scipy.stats.distributions import expon
from scipy.stats import lognorm, uniform
seed = 42
np.random.seed(seed)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Rutine for sweeping through hyper-parameters setups for the original sidenet')
parser.add_argument('gpu',help='gpu id',type=str,default="0")
parser.add_argument('dataset',help='Dataset to use / mode of FLAGS setup',type=str,default="newsqa")
parser.add_argument('file_suffix',help='Suffix for exp name',type=str,default="")
args = parser.parse_args()
FLAGS.data_mode = args.dataset
FLAGS.gpu_id = args.gpu
FLAGS.train_dir = os.path.abspath("./train_dir_" + args.dataset+"_subs")
FLAGS.train_epoch_crossentropy = 50
if args.dataset=='wikiqa':
FLAGS.use_subsampled_dataset = False
FLAGS.max_sent_length =100
FLAGS.max_doc_length = 30
elif args.dataset=='newsqa':
FLAGS.use_subsampled_dataset = True
FLAGS.max_sent_length = 50
FLAGS.max_doc_length = 64
elif args.dataset=='squad':
FLAGS.use_subsampled_dataset = True
FLAGS.max_sent_length = 80
FLAGS.max_doc_length = 16
FLAGS.pretrained_wordembedding_orgdata = os.path.expanduser("../datasets/word_emb/1-billion-word-language-modeling-benchmark-r13output.word2vec.vec")
FLAGS.preprocessed_data_directory = os.path.expanduser("../datasets/preprocessed_data")
FLAGS.force_reading = False
FLAGS.max_filter_length = 8
FLAGS.min_filter_length = 5
FLAGS.norm_extra_feats = True
FLAGS.max_gradient_norm = -1
FLAGS.decorrelate_extra_feats = False
# set sentence, doc length to maximum
sp.Popen(["mkdir","-p","tunning_"+FLAGS.data_mode])
output = open("tunning_"+FLAGS.data_mode+"/"+FLAGS.data_mode + "_hp_grid_tuning_%s.txt" % args.file_suffix,'w')
print("Reading vocabulary...")
vocab_dict, word_embedding_array = DataProcessor().prepare_vocab_embeddingdict()
print("Reading training set...")
train_data = DataProcessor().prepare_news_data(vocab_dict, data_type="training") # subsampled
# data in whole batch with padded matrixes
print("Reading validation set...")
val_batch = batch_load_data(DataProcessor().prepare_news_data(vocab_dict,
data_type="validation",
normalizer=train_data.normalizer,
pca_model=train_data.pca_model))
setup_by_id = {}
results_by_id = {}
results_by_id_mrr = {}
setup_id = 0
best_global_acc = -1
best_global_mrr = -1
best_setup_id = -1
best_setup_id_mrr = -1
parameter_grid = {
"batch_size" : [20],
"learning_rate" : [math.exp(-4.605170185988091)],
"size":[1833],
"sentembed_size":[211],
"use_dropout":[True],
"dropout": [0.65,0.8,1.0]
}
## loop for hyperparams
param_gen = ParameterGrid(parameter_grid)
for setup in param_gen:
setup_time = time.time()
setup_by_id[setup_id] = setup
FLAGS.batch_size = setup["batch_size"]
FLAGS.learning_rate = setup["learning_rate"]
FLAGS.size = setup["size"]
FLAGS.sentembed_size = setup["sentembed_size"]
FLAGS.use_dropout = setup["use_dropout"]
FLAGS.dropout = setup["dropout"]
prev_drpt = FLAGS.use_dropout
# check if concat, then adjust sentemb size
fil_lens_to_test = FLAGS.max_filter_length - FLAGS.min_filter_length + 1
if FLAGS.handle_filter_output == "concat" and FLAGS.sentembed_size%fil_lens_to_test != 0:
q = int(FLAGS.sentembed_size // fil_lens_to_test)
FLAGS.sentembed_size = q * fil_lens_to_test
print("Setup ",setup_id,": ",setup)
output.write("Setup %d: %s\n" % (setup_id,str(setup)))
best_acc = -1
best_mrr = -1
best_ep = 0
best_ep_mrr = 0
with tf.Graph().as_default() and tf.device('/gpu:'+FLAGS.gpu_id):
config = tf.ConfigProto(allow_soft_placement = True)
tf.set_random_seed(seed)
with tf.Session(config = config) as sess:
model = MY_Model(sess, len(v4ocab_dict)-2)
init_epoch = 1
sess.run(model.vocab_embed_variable.assign(word_embedding_array))
for epoch in range(init_epoch, FLAGS.train_epoch_crossentropy+1):
ep_time = time.time() # to check duration
train_data.shuffle_fileindices()
total_loss = 0
# Start Batch Training
step = 1
while (step * FLAGS.batch_size) <= len(train_data.fileindices):
# Get batch data as Numpy Arrays
batch = train_data.get_batch(((step-1)*FLAGS.batch_size), (step * FLAGS.batch_size))
# Run optimizer: optimize policy and reward estimator
_,ce_loss = sess.run([model.train_op_policynet_withgold,
model.cross_entropy_loss],
feed_dict={model.document_placeholder: batch.docs,
model.label_placeholder: batch.labels,
model.weight_placeholder: batch.weights,
model.isf_score_placeholder: batch.isf_score,
model.idf_score_placeholder: batch.idf_score,
model.locisf_score_placeholder: batch.locisf_score})
total_loss += ce_loss
# Increase step
if step%500==0:
print ("\tStep: ",step)
step += 1
#END-WHILE-TRAINING
total_loss /= step
FLAGS.authorise_gold_label = False
FLAGS.use_dropout = False
# retrieve batch with updated logits in it
val_batch = batch_predict_with_a_model(val_batch, "validation", model, session=sess)
FLAGS.authorise_gold_label = True
FLAGS.use_dropout = prev_drpt
probs = sess.run(model.predictions,feed_dict={model.logits_placeholder: val_batch.logits})
validation_acc = accuracy_qas_top(probs, val_batch.labels, val_batch.weights, val_batch.isf_score_ids)
val_mrr = mrr_metric(probs, val_batch.labels, val_batch.weights, val_batch.isf_score_ids,"validation")
print("\tEpoch %2d || Train ce_loss: %4.3f || Val acc: %.4f || Val mrr: %.4f || duration: %3.2f" %
(epoch,total_loss,validation_acc,val_mrr,time.time()-ep_time))
output.write("\tEpoch %2d || Train ce_loss: %4.3f || Val acc: %.4f || Val mrr: %.4f || duration: %3.2f\n" %
(epoch,total_loss,validation_acc,val_mrr,time.time()-ep_time))
if validation_acc > best_acc:
best_acc = validation_acc
best_ep = epoch
if val_mrr > best_mrr:
best_mrr = val_mrr
best_ep_mrr = epoch
#break # for time testing
#END-FOR-EPOCH
results_by_id[setup_id] = (best_acc,best_ep)
results_by_id_mrr[setup_id] = (best_mrr,best_ep_mrr)
if best_acc > best_global_acc:
best_global_acc = best_acc
best_setup_id = setup_id
if best_mrr > best_global_mrr:
best_global_mrr = best_mrr
best_setup_id_mrr = setup_id
# clear graph
tf.reset_default_graph()
#END-GRAPH
print("Best ACC result in this setup:",results_by_id[setup_id])
print("Best MRR result in this setup:",results_by_id_mrr[setup_id])
print("Duration: %.4fsec" % (time.time()-setup_time))
output.write("Best acc result in this setup: %.6f,%d\n" % (best_acc,best_ep))
output.write("Best mrr result in this setup: %.6f,%d\n" % (best_mrr,best_ep_mrr))
output.write("Duration: %.4fsec\n" % (time.time()-setup_time))
setup_id += 1
#END-FOR-PARAMS
print("Best acc setup: ",setup_by_id[best_setup_id])
print(" Acc: %.4f | Epoch: %d" % results_by_id[best_setup_id])
print("Best mrr setup: ",setup_by_id_mrr[best_setup_id_mrr])
print(" MRR: %.4f | Epoch: %d" % results_by_id_mrr[best_setup_id_mrr])
output.write("Best acc setup: " + str(setup_by_id[best_setup_id]) + "\n")
output.write(" Acc: %.4f | Epoch: %d\n" % results_by_id[best_setup_id])
output.write("Best mrr setup: " + str(setup_by_id[best_setup_id_mrr]) + "\n")
output.write(" MRR: %.4f | Epoch: %d\n" % results_by_id_mrr[best_setup_id_mrr])
output.close()
| 40.681416 | 151 | 0.654992 |
9943c6c73edbfc8684845122f6fcc7831819ca02 | 245 | py | Python | deepmux/errors.py | Deep-Mux/deepmux-cli | ff147259ffb1b0bef613f9b15e4e029fb859d797 | [
"MIT"
] | 4 | 2020-11-23T18:56:25.000Z | 2021-03-19T23:38:24.000Z | deepmux/errors.py | Deep-Mux/deepmux-cli | ff147259ffb1b0bef613f9b15e4e029fb859d797 | [
"MIT"
] | null | null | null | deepmux/errors.py | Deep-Mux/deepmux-cli | ff147259ffb1b0bef613f9b15e4e029fb859d797 | [
"MIT"
] | null | null | null | class DeepmuxCLIError(Exception):
...
class UnknownException(DeepmuxCLIError):
...
class LoginRequired(DeepmuxCLIError):
...
class NameConflict(DeepmuxCLIError): # 409
...
class NotFound(DeepmuxCLIError): # 404
...
| 12.894737 | 43 | 0.669388 |
796dd11a57c9a76e7966ae02bad8e4373ed92d1e | 720 | py | Python | Exercicios Python/ex084.py | ClaudioSiqueira/Exercicios-Python | 128387769b34b7d42aee5c1effda16de21216e10 | [
"MIT"
] | null | null | null | Exercicios Python/ex084.py | ClaudioSiqueira/Exercicios-Python | 128387769b34b7d42aee5c1effda16de21216e10 | [
"MIT"
] | null | null | null | Exercicios Python/ex084.py | ClaudioSiqueira/Exercicios-Python | 128387769b34b7d42aee5c1effda16de21216e10 | [
"MIT"
] | null | null | null | cont = 0
maior = menor = 0
maior_nome = []
menor_nome = []
while True:
name = input('Nome: ')
weight = float(input('Peso : '))
if cont == 0:
menor = maior = weight
elif cont >= 1:
if weight > maior:
maior = weight
maior_nome.append(name)
if weight < menor:
menor = weight
menor_nome.clear()
menor_nome.append(name)
answer = input('Quer continuar ? [S/N] ').upper()
cont += 1
if answer == 'N':
break
print('Ao todo, você cadastrou {} pessoas'.format(cont))
print('O maior peso foi de {}. Peso de {}'.format(maior, maior_nome))
print('O menor peso foi de {}. Peso de {}'.format(menor, menor_nome))
| 24.827586 | 69 | 0.552778 |
f356aded41431a94e363764e421250274c1ed053 | 8,465 | py | Python | inventory/views.py | shoaibsaikat/Django-Office-Management | 952aa44c2d3c2f99e91c2ed1aada17ee15fc9eb0 | [
"Apache-2.0"
] | null | null | null | inventory/views.py | shoaibsaikat/Django-Office-Management | 952aa44c2d3c2f99e91c2ed1aada17ee15fc9eb0 | [
"Apache-2.0"
] | null | null | null | inventory/views.py | shoaibsaikat/Django-Office-Management | 952aa44c2d3c2f99e91c2ed1aada17ee15fc9eb0 | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib import messages
from django.http import HttpResponse, JsonResponse
from django.urls import reverse_lazy
from django.views.generic import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView
from django.views import View
from django.views.decorators.csrf import csrf_protect
from django.shortcuts import render, redirect
from django.db import transaction
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core import serializers
from datetime import datetime
from . import forms
from . import models
import json
import logging
logger = logging.getLogger(__name__)
PAGE_COUNT = 10
def get_paginated_date(page, list, count):
paginator = Paginator(list, count)
try:
pages = paginator.page(page)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
return pages
class InventoryListView(LoginRequiredMixin, UserPassesTestMixin, ListView):
model = models.Inventory
paginate_by = PAGE_COUNT
def test_func(self):
return self.request.user.profile.canDistributeInventory or self.request.user.profile.canApproveInventory
class InventoryCreateView(LoginRequiredMixin, UserPassesTestMixin, CreateView):
login_url = '/user/signin/'
redirect_field_name = 'redirect_to'
model = models.Inventory
fields = ['name', 'description', 'unit', 'count']
success_url = reverse_lazy('inventory:list')
def test_func(self):
return self.request.user.profile.canDistributeInventory or self.request.user.profile.canApproveInventory
class InventoryUpdateView(LoginRequiredMixin, UserPassesTestMixin, View):
def get(self, request, *args, **kwargs):
inventory = models.Inventory.objects.get(pk=kwargs['pk'])
return render(request, 'inventory/inventory_update_form.html', {'form': inventory})
def post(self, request, *args, **kwargs):
inventory = models.Inventory.objects.get(pk=kwargs['pk'])
inventory.description = self.request.POST['description']
inventory.unit = self.request.POST['unit']
inventory.count = self.request.POST['count']
inventory.save()
messages.success(request, "Information updated!")
return render(request, 'inventory/inventory_update_form.html', {'form': inventory})
def test_func(self):
return self.request.user.profile.canDistributeInventory or self.request.user.profile.canApproveInventory
class RequisitionCreateView(LoginRequiredMixin, CreateView):
model = models.Requisition
form_class = forms.RequisitionForm
success_url = reverse_lazy('index')
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
class MyRequisitionListView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
requisitionList = models.Requisition.objects.filter(user=self.request.user).order_by('-pk')
# pagination
page = request.GET.get('page', 1)
requisitions = get_paginated_date(page, requisitionList, PAGE_COUNT)
return render(request, 'inventory/requisition_personal_list.html', {'object_list': requisitions})
class RequisitionListView(LoginRequiredMixin, UserPassesTestMixin, View):
def get(self, request, *args, **kwargs):
requisitionList = models.Requisition.objects.filter(approver=self.request.user, approved=False).order_by('-pk')
# pagination
page = request.GET.get('page', 1)
requisitions = get_paginated_date(page, requisitionList, PAGE_COUNT)
# generating distributor list for dropdown
users = models.User.objects.all()
return render(request, 'inventory/requisition_list.html', {'object_list': requisitions, 'distributor_list': users})
def post(self, request, *args, **kwargs):
requisition = models.Requisition.objects.filter(pk=request.POST['pk']).first()
requisition.approved = True
if request.POST.get('distributor', False):
requisition.distributor = models.User.objects.filter(pk=request.POST['distributor']).first()
requisition.approveDate = datetime.now()
requisition.save()
return redirect('inventory:requisition_list')
def test_func(self):
return self.request.user.profile.canApproveInventory
class RequisitionDetailFormView(LoginRequiredMixin, UserPassesTestMixin, View):
def get(self, request, *args, **kwargs):
requisition = models.Requisition.objects.filter(pk=kwargs['pk'], approver=self.request.user, approved=False).first()
users = models.User.objects.all()
return render(request, 'inventory/requisition_detail_form.html', {'requisition': requisition, 'users': users})
def post(self, request, *args, **kwargs):
# logger.warning('distributor: {}'.format(request.POST['distributor']))
requisition = models.Requisition.objects.filter(pk=kwargs['pk']).first()
requisition.approved = True
if request.POST.get('distributor', False):
requisition.distributor = models.User.objects.filter(pk=request.POST['distributor']).first()
requisition.approveDate = datetime.now()
requisition.save()
return redirect('inventory:requisition_list')
def test_func(self):
return self.request.user.profile.canDistributeInventory or self.request.user.profile.canApproveInventory
class RequisitionDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):
model = models.Requisition
def test_func(self):
return self.request.user.profile.canDistributeInventory or self.request.user.profile.canApproveInventory
class RequisitionApprovedListView(LoginRequiredMixin, UserPassesTestMixin, View):
def get(self, request, *args, **kwargs):
requisitionList = models.Requisition.objects.filter(distributor=self.request.user, distributed=False).order_by('-pk')
# pagination
page = request.GET.get('page', 1)
requisitions = get_paginated_date(page, requisitionList, PAGE_COUNT)
return render(request, 'inventory/requisition_approved_list.html', {'object_list': requisitions})
def test_func(self):
return self.request.user.profile.canDistributeInventory
class RequisitionHistoryList(LoginRequiredMixin, UserPassesTestMixin, ListView):
model = models.Requisition
paginate_by = PAGE_COUNT
ordering = ['-requestDate']
template_name = 'inventory/requisition_history.html'
def get_queryset(self):
return models.Requisition.objects.all().order_by('-pk')
def test_func(self):
return self.request.user.profile.canDistributeInventory or self.request.user.profile.canApproveInventory
@csrf_protect
@login_required
@user_passes_test(lambda u: u.profile.canDistributeInventory)
@transaction.atomic
def requisitionDistribution(request, pk):
requisition = models.Requisition.objects.filter(pk=pk).first()
inventory = models.Inventory.objects.filter(pk=requisition.inventory.pk).first()
if inventory.count < requisition.amount:
messages.error(request, 'Distribution failed! Inventory low, please add items to the inventory first')
else:
requisition.distributed = True
requisition.distributionDate = datetime.now()
inventory.count = inventory.count - requisition.amount
requisition.save()
inventory.save()
return redirect('inventory:requisition_approved_list')
@csrf_protect
@login_required
@user_passes_test(lambda u: u.profile.canDistributeInventory or u.profile.canApproveInventory)
def inventoryQuickEdit(request, pk, amount):
item = models.Inventory.objects.get(pk=pk)
item.count = amount
item.save()
messages.success(request, item.name + ' updated!')
return redirect('inventory:list')
def is_ajax(request):
return request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
def getInventoryList(request):
if is_ajax(request) and request.method == 'GET':
list = models.Inventory.objects.all()
return JsonResponse({'inventory_list': serializers.serialize('json', list)}, status = 200)
return JsonResponse({}, status = 400)
| 41.495098 | 125 | 0.733373 |
96c5a2dd53872d328bedd7c098ba65cf9739ccbb | 22,677 | py | Python | nipype/interfaces/fsl/tests/test_preprocess.py | lighthall-lab/nipype-legacy | 6c23846aa50c2ce34653f9517d95f02b071dc52d | [
"Apache-2.0"
] | 2 | 2019-01-25T18:20:51.000Z | 2019-07-30T20:51:51.000Z | nipype/interfaces/fsl/tests/test_preprocess.py | lighthall-lab/nipype-legacy | 6c23846aa50c2ce34653f9517d95f02b071dc52d | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/fsl/tests/test_preprocess.py | lighthall-lab/nipype-legacy | 6c23846aa50c2ce34653f9517d95f02b071dc52d | [
"Apache-2.0"
] | 2 | 2018-01-25T19:48:17.000Z | 2019-01-25T18:20:52.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from builtins import str
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from builtins import open, open
import os
import tempfile
from copy import deepcopy
import pytest
from nipype.utils.filemanip import split_filename, filename_to_list
from .. import preprocess as fsl
from nipype.interfaces.fsl import Info
from nipype.interfaces.base import File, TraitError, Undefined, isdefined
from nipype.interfaces.fsl import no_fsl
def fsl_name(obj, fname):
"""Create valid fsl name, including file extension for output type.
"""
ext = Info.output_type_to_ext(obj.inputs.output_type)
return fname + ext
@pytest.fixture()
def setup_infile(tmpdir):
ext = Info.output_type_to_ext(Info.output_type())
tmp_dir = str(tmpdir)
tmp_infile = os.path.join(tmp_dir, 'foo' + ext)
open(tmp_infile, 'w')
return (tmp_infile, tmp_dir)
@pytest.mark.skipif(no_fsl(), reason="fsl is not installed")
def test_bet(setup_infile):
tmp_infile, tp_dir = setup_infile
better = fsl.BET()
assert better.cmd == 'bet'
# Test raising error with mandatory args absent
with pytest.raises(ValueError):
better.run()
# Test generated outfile name
better.inputs.in_file = tmp_infile
outfile = fsl_name(better, 'foo_brain')
outpath = os.path.join(os.getcwd(), outfile)
realcmd = 'bet %s %s' % (tmp_infile, outpath)
assert better.cmdline == realcmd
# Test specified outfile name
outfile = fsl_name(better, '/newdata/bar')
better.inputs.out_file = outfile
realcmd = 'bet %s %s' % (tmp_infile, outfile)
assert better.cmdline == realcmd
# infile foo.nii doesn't exist
def func():
better.run(in_file='foo2.nii', out_file='bar.nii')
with pytest.raises(TraitError):
func()
# Our options and some test values for them
# Should parallel the opt_map structure in the class for clarity
opt_map = {
'outline': ('-o', True),
'mask': ('-m', True),
'skull': ('-s', True),
'no_output': ('-n', True),
'frac': ('-f 0.40', 0.4),
'vertical_gradient': ('-g 0.75', 0.75),
'radius': ('-r 20', 20),
'center': ('-c 54 75 80', [54, 75, 80]),
'threshold': ('-t', True),
'mesh': ('-e', True),
'surfaces': ('-A', True)
# 'verbose': ('-v', True),
# 'flags': ('--i-made-this-up', '--i-made-this-up'),
}
# Currently we don't test -R, -S, -B, -Z, -F, -A or -A2
# test each of our arguments
better = fsl.BET()
outfile = fsl_name(better, 'foo_brain')
outpath = os.path.join(os.getcwd(), outfile)
for name, settings in list(opt_map.items()):
better = fsl.BET(**{name: settings[1]})
# Add mandatory input
better.inputs.in_file = tmp_infile
realcmd = ' '.join([better.cmd, tmp_infile, outpath, settings[0]])
assert better.cmdline == realcmd
# test fast
@pytest.mark.skipif(no_fsl(), reason="fsl is not installed")
def test_fast(setup_infile):
tmp_infile, tp_dir = setup_infile
faster = fsl.FAST()
faster.inputs.verbose = True
fasted = fsl.FAST(in_files=tmp_infile, verbose=True)
fasted2 = fsl.FAST(in_files=[tmp_infile, tmp_infile], verbose=True)
assert faster.cmd == 'fast'
assert faster.inputs.verbose == True
assert faster.inputs.manual_seg == Undefined
assert faster.inputs != fasted.inputs
assert fasted.cmdline == 'fast -v -S 1 %s' % (tmp_infile)
assert fasted2.cmdline == 'fast -v -S 2 %s %s' % (tmp_infile, tmp_infile)
faster = fsl.FAST()
faster.inputs.in_files = tmp_infile
assert faster.cmdline == 'fast -S 1 %s' % (tmp_infile)
faster.inputs.in_files = [tmp_infile, tmp_infile]
assert faster.cmdline == 'fast -S 2 %s %s' % (tmp_infile, tmp_infile)
# Our options and some test values for them
# Should parallel the opt_map structure in the class for clarity
opt_map = {'number_classes': ('-n 4', 4),
'bias_iters': ('-I 5', 5),
'bias_lowpass': ('-l 15', 15),
'img_type': ('-t 2', 2),
'init_seg_smooth': ('-f 0.035', 0.035),
'segments': ('-g', True),
'init_transform': ('-a %s' % (tmp_infile), '%s' % (tmp_infile)),
'other_priors': ('-A %s %s %s' % (tmp_infile, tmp_infile,
tmp_infile),
(['%s' % (tmp_infile),
'%s' % (tmp_infile),
'%s' % (tmp_infile)])),
'no_pve': ('--nopve', True),
'output_biasfield': ('-b', True),
'output_biascorrected': ('-B', True),
'no_bias': ('-N', True),
'out_basename': ('-o fasted', 'fasted'),
'use_priors': ('-P', True),
'segment_iters': ('-W 14', 14),
'mixel_smooth': ('-R 0.25', 0.25),
'iters_afterbias': ('-O 3', 3),
'hyper': ('-H 0.15', 0.15),
'verbose': ('-v', True),
'manual_seg': ('-s %s' % (tmp_infile), '%s' % (tmp_infile)),
'probability_maps': ('-p', True),
}
# test each of our arguments
for name, settings in list(opt_map.items()):
faster = fsl.FAST(in_files=tmp_infile, **{name: settings[1]})
assert faster.cmdline == ' '.join([faster.cmd, settings[0],
"-S 1 %s" % tmp_infile])
@pytest.mark.skipif(no_fsl(), reason="fsl is not installed")
def test_fast_list_outputs(setup_infile):
''' By default (no -o), FSL's fast command outputs files into the same
directory as the input files. If the flag -o is set, it outputs files into
the cwd '''
def _run_and_test(opts, output_base):
outputs = fsl.FAST(**opts)._list_outputs()
for output in outputs.values():
if output:
for filename in filename_to_list(output):
assert os.path.realpath(filename).startswith(os.path.realpath(output_base))
# set up
tmp_infile, indir = setup_infile
cwd = tempfile.mkdtemp()
os.chdir(cwd)
assert indir != cwd
out_basename = 'a_basename'
# run and test
opts = {'in_files': tmp_infile}
input_path, input_filename, input_ext = split_filename(tmp_infile)
_run_and_test(opts, os.path.join(input_path, input_filename))
opts['out_basename'] = out_basename
_run_and_test(opts, os.path.join(cwd, out_basename))
@pytest.fixture()
def setup_flirt(tmpdir):
ext = Info.output_type_to_ext(Info.output_type())
tmp_dir = str(tmpdir)
_, infile = tempfile.mkstemp(suffix=ext, dir=tmp_dir)
_, reffile = tempfile.mkstemp(suffix=ext, dir=tmp_dir)
return (tmp_dir, infile, reffile)
@pytest.mark.skipif(no_fsl(), reason="fsl is not installed")
def test_flirt(setup_flirt):
# setup
tmpdir, infile, reffile = setup_flirt
flirter = fsl.FLIRT()
assert flirter.cmd == 'flirt'
flirter.inputs.bins = 256
flirter.inputs.cost = 'mutualinfo'
flirted = fsl.FLIRT(in_file=infile, reference=reffile,
out_file='outfile', out_matrix_file='outmat.mat',
bins=256,
cost='mutualinfo')
flirt_est = fsl.FLIRT(in_file=infile, reference=reffile,
out_matrix_file='outmat.mat',
bins=256,
cost='mutualinfo')
assert flirter.inputs != flirted.inputs
assert flirted.inputs != flirt_est.inputs
assert flirter.inputs.bins == flirted.inputs.bins
assert flirter.inputs.cost == flirt_est.inputs.cost
realcmd = 'flirt -in %s -ref %s -out outfile -omat outmat.mat ' \
'-bins 256 -cost mutualinfo' % (infile, reffile)
assert flirted.cmdline == realcmd
flirter = fsl.FLIRT()
# infile not specified
with pytest.raises(ValueError):
flirter.cmdline
flirter.inputs.in_file = infile
# reference not specified
with pytest.raises(ValueError):
flirter.cmdline
flirter.inputs.reference = reffile
# Generate outfile and outmatrix
pth, fname, ext = split_filename(infile)
outfile = fsl_name(flirter, '%s_flirt' % fname)
outmat = '%s_flirt.mat' % fname
realcmd = 'flirt -in %s -ref %s -out %s -omat %s' % (infile, reffile,
outfile, outmat)
assert flirter.cmdline == realcmd
# test apply_xfm option
axfm = deepcopy(flirter)
axfm.inputs.apply_xfm = True
# in_matrix_file or uses_qform must be defined
with pytest.raises(RuntimeError): axfm.cmdline
axfm2 = deepcopy(axfm)
# test uses_qform
axfm.inputs.uses_qform = True
assert axfm.cmdline == (realcmd + ' -applyxfm -usesqform')
# test in_matrix_file
axfm2.inputs.in_matrix_file = reffile
assert axfm2.cmdline == (realcmd + ' -applyxfm -init %s' % reffile)
_, tmpfile = tempfile.mkstemp(suffix='.nii', dir=tmpdir)
# Loop over all inputs, set a reasonable value and make sure the
# cmdline is updated correctly.
for key, trait_spec in sorted(fsl.FLIRT.input_spec().traits().items()):
# Skip mandatory inputs and the trait methods
if key in ('trait_added', 'trait_modified', 'in_file', 'reference',
'environ', 'output_type', 'out_file', 'out_matrix_file',
'in_matrix_file', 'apply_xfm', 'ignore_exception',
'terminal_output', 'out_log', 'save_log'):
continue
param = None
value = None
if key == 'args':
param = '-v'
value = '-v'
elif isinstance(trait_spec.trait_type, File):
value = tmpfile
param = trait_spec.argstr % value
elif trait_spec.default is False:
param = trait_spec.argstr
value = True
elif key in ('searchr_x', 'searchr_y', 'searchr_z'):
value = [-45, 45]
param = trait_spec.argstr % ' '.join(str(elt) for elt in value)
else:
value = trait_spec.default
param = trait_spec.argstr % value
cmdline = 'flirt -in %s -ref %s' % (infile, reffile)
# Handle autogeneration of outfile
pth, fname, ext = split_filename(infile)
outfile = fsl_name(fsl.FLIRT(), '%s_flirt' % fname)
outfile = ' '.join(['-out', outfile])
# Handle autogeneration of outmatrix
outmatrix = '%s_flirt.mat' % fname
outmatrix = ' '.join(['-omat', outmatrix])
# Build command line
cmdline = ' '.join([cmdline, outfile, outmatrix, param])
flirter = fsl.FLIRT(in_file=infile, reference=reffile)
setattr(flirter.inputs, key, value)
assert flirter.cmdline == cmdline
# Test OutputSpec
flirter = fsl.FLIRT(in_file=infile, reference=reffile)
pth, fname, ext = split_filename(infile)
flirter.inputs.out_file = ''.join(['foo', ext])
flirter.inputs.out_matrix_file = ''.join(['bar', ext])
outs = flirter._list_outputs()
assert outs['out_file'] == \
os.path.join(os.getcwd(), flirter.inputs.out_file)
assert outs['out_matrix_file'] == \
os.path.join(os.getcwd(), flirter.inputs.out_matrix_file)
# Mcflirt
@pytest.mark.skipif(no_fsl(), reason="fsl is not installed")
def test_mcflirt(setup_flirt):
tmpdir, infile, reffile = setup_flirt
frt = fsl.MCFLIRT()
assert frt.cmd == 'mcflirt'
# Test generated outfile name
frt.inputs.in_file = infile
_, nme = os.path.split(infile)
outfile = os.path.join(os.getcwd(), nme)
outfile = frt._gen_fname(outfile, suffix='_mcf')
realcmd = 'mcflirt -in ' + infile + ' -out ' + outfile
assert frt.cmdline == realcmd
# Test specified outfile name
outfile2 = '/newdata/bar.nii'
frt.inputs.out_file = outfile2
realcmd = 'mcflirt -in ' + infile + ' -out ' + outfile2
assert frt.cmdline == realcmd
opt_map = {
'cost': ('-cost mutualinfo', 'mutualinfo'),
'bins': ('-bins 256', 256),
'dof': ('-dof 6', 6),
'ref_vol': ('-refvol 2', 2),
'scaling': ('-scaling 6.00', 6.00),
'smooth': ('-smooth 1.00', 1.00),
'rotation': ('-rotation 2', 2),
'stages': ('-stages 3', 3),
'init': ('-init %s' % (infile), infile),
'use_gradient': ('-gdt', True),
'use_contour': ('-edge', True),
'mean_vol': ('-meanvol', True),
'stats_imgs': ('-stats', True),
'save_mats': ('-mats', True),
'save_plots': ('-plots', True),
}
for name, settings in list(opt_map.items()):
fnt = fsl.MCFLIRT(in_file=infile, **{name: settings[1]})
instr = '-in %s' % (infile)
outstr = '-out %s' % (outfile)
if name in ('init', 'cost', 'dof', 'mean_vol', 'bins'):
assert fnt.cmdline == ' '.join([fnt.cmd,
instr,
settings[0],
outstr])
else:
assert fnt.cmdline == ' '.join([fnt.cmd,
instr,
outstr,
settings[0]])
# Test error is raised when missing required args
fnt = fsl.MCFLIRT()
with pytest.raises(ValueError):
fnt.run()
# test fnirt
@pytest.mark.skipif(no_fsl(), reason="fsl is not installed")
def test_fnirt(setup_flirt):
tmpdir, infile, reffile = setup_flirt
os.chdir(tmpdir)
fnirt = fsl.FNIRT()
assert fnirt.cmd == 'fnirt'
# Test list parameters
params = [('subsampling_scheme', '--subsamp', [4, 2, 2, 1], '4,2,2,1'),
('max_nonlin_iter', '--miter', [4, 4, 4, 2], '4,4,4,2'),
('ref_fwhm', '--reffwhm', [4, 2, 2, 0], '4,2,2,0'),
('in_fwhm', '--infwhm', [4, 2, 2, 0], '4,2,2,0'),
('apply_refmask', '--applyrefmask', [0, 0, 1, 1], '0,0,1,1'),
('apply_inmask', '--applyinmask', [0, 0, 0, 1], '0,0,0,1'),
('regularization_lambda', '--lambda', [0.5, 0.75], '0.5,0.75')]
for item, flag, val, strval in params:
fnirt = fsl.FNIRT(in_file=infile,
ref_file=reffile,
**{item: val})
log = fnirt._gen_fname(infile, suffix='_log.txt', change_ext=False)
iout = fnirt._gen_fname(infile, suffix='_warped')
if item in ('max_nonlin_iter'):
cmd = 'fnirt --in=%s '\
'--logout=%s'\
' %s=%s --ref=%s'\
' --iout=%s' % (infile, log,
flag, strval, reffile, iout)
elif item in ('in_fwhm'):
cmd = 'fnirt --in=%s %s=%s --logout=%s '\
'--ref=%s --iout=%s' % (infile, flag,
strval, log, reffile, iout)
elif item.startswith('apply'):
cmd = 'fnirt %s=%s '\
'--in=%s '\
'--logout=%s '\
'--ref=%s --iout=%s' % (flag, strval,
infile, log,
reffile,
iout)
else:
cmd = 'fnirt '\
'--in=%s --logout=%s '\
'--ref=%s %s=%s --iout=%s' % (infile, log,
reffile,
flag, strval,
iout)
assert fnirt.cmdline == cmd
# Test ValueError is raised when missing mandatory args
fnirt = fsl.FNIRT()
with pytest.raises(ValueError):
fnirt.run()
fnirt.inputs.in_file = infile
fnirt.inputs.ref_file = reffile
intmap_basename = '%s_intmap' % fsl.FNIRT.intensitymap_file_basename(infile)
intmap_image = fsl_name(fnirt, intmap_basename)
intmap_txt = '%s.txt' % intmap_basename
# doing this to create the file to pass tests for file existence
with open(intmap_image, 'w'):
pass
with open(intmap_txt, 'w'):
pass
# test files
opt_map = [
('affine_file', '--aff=%s' % infile, infile),
('inwarp_file', '--inwarp=%s' % infile, infile),
('in_intensitymap_file', '--intin=%s' % intmap_basename, [intmap_image]),
('in_intensitymap_file',
'--intin=%s' % intmap_basename,
[intmap_image, intmap_txt]),
('config_file', '--config=%s' % infile, infile),
('refmask_file', '--refmask=%s' % infile, infile),
('inmask_file', '--inmask=%s' % infile, infile),
('field_file', '--fout=%s' % infile, infile),
('jacobian_file', '--jout=%s' % infile, infile),
('modulatedref_file', '--refout=%s' % infile, infile),
('out_intensitymap_file',
'--intout=%s' % intmap_basename, True),
('out_intensitymap_file', '--intout=%s' % intmap_basename, intmap_image),
('fieldcoeff_file', '--cout=%s' % infile, infile),
('log_file', '--logout=%s' % infile, infile)]
for (name, settings, arg) in opt_map:
fnirt = fsl.FNIRT(in_file=infile,
ref_file=reffile,
**{name: arg})
if name in ('config_file', 'affine_file', 'field_file', 'fieldcoeff_file'):
cmd = 'fnirt %s --in=%s '\
'--logout=%s '\
'--ref=%s --iout=%s' % (settings, infile, log,
reffile, iout)
elif name in ('refmask_file'):
cmd = 'fnirt --in=%s '\
'--logout=%s --ref=%s '\
'%s '\
'--iout=%s' % (infile, log,
reffile,
settings,
iout)
elif name in ('in_intensitymap_file', 'inwarp_file', 'inmask_file', 'jacobian_file'):
cmd = 'fnirt --in=%s '\
'%s '\
'--logout=%s --ref=%s '\
'--iout=%s' % (infile,
settings,
log,
reffile,
iout)
elif name in ('log_file'):
cmd = 'fnirt --in=%s '\
'%s --ref=%s '\
'--iout=%s' % (infile,
settings,
reffile,
iout)
else:
cmd = 'fnirt --in=%s '\
'--logout=%s %s '\
'--ref=%s --iout=%s' % (infile, log,
settings,
reffile, iout)
assert fnirt.cmdline == cmd
if name == 'out_intensitymap_file':
assert fnirt._list_outputs()['out_intensitymap_file'] == [
intmap_image, intmap_txt]
@pytest.mark.skipif(no_fsl(), reason="fsl is not installed")
def test_applywarp(setup_flirt):
tmpdir, infile, reffile = setup_flirt
opt_map = {
'out_file': ('--out=bar.nii', 'bar.nii'),
'premat': ('--premat=%s' % (reffile), reffile),
'postmat': ('--postmat=%s' % (reffile), reffile),
}
# in_file, ref_file, field_file mandatory
for name, settings in list(opt_map.items()):
awarp = fsl.ApplyWarp(in_file=infile,
ref_file=reffile,
field_file=reffile,
**{name: settings[1]})
if name == 'out_file':
realcmd = 'applywarp --in=%s '\
'--ref=%s --out=%s '\
'--warp=%s' % (infile, reffile,
settings[1], reffile)
else:
outfile = awarp._gen_fname(infile, suffix='_warp')
realcmd = 'applywarp --in=%s '\
'--ref=%s --out=%s '\
'--warp=%s %s' % (infile, reffile,
outfile, reffile,
settings[0])
assert awarp.cmdline == realcmd
@pytest.fixture()
def setup_fugue(tmpdir):
import nibabel as nb
import numpy as np
import os.path as op
d = np.ones((80, 80, 80))
tmp_dir = str(tmpdir)
infile = op.join(tmp_dir, 'dumbfile.nii.gz')
nb.Nifti1Image(d, None, None).to_filename(infile)
return (tmp_dir, infile)
@pytest.mark.skipif(no_fsl(), reason="fsl is not installed")
@pytest.mark.parametrize("attr, out_file", [
({"save_unmasked_fmap":True, "fmap_in_file":"infile", "mask_file":"infile", "output_type":"NIFTI_GZ"},
'fmap_out_file'),
({"save_unmasked_shift":True, "fmap_in_file":"infile", "dwell_time":1.e-3, "mask_file":"infile", "output_type": "NIFTI_GZ"},
"shift_out_file"),
({"in_file":"infile", "mask_file":"infile", "shift_in_file":"infile", "output_type":"NIFTI_GZ"},
'unwarped_file')
])
def test_fugue(setup_fugue, attr, out_file):
import os.path as op
tmpdir, infile = setup_fugue
fugue = fsl.FUGUE()
for key, value in attr.items():
if value == "infile": setattr(fugue.inputs, key, infile)
else: setattr(fugue.inputs, key, value)
res = fugue.run()
assert isdefined(getattr(res.outputs,out_file))
trait_spec = fugue.inputs.trait(out_file)
out_name = trait_spec.name_template % 'dumbfile'
out_name += '.nii.gz'
assert op.basename(getattr(res.outputs, out_file)) == out_name
@pytest.mark.skipif(no_fsl(), reason="fsl is not installed")
def test_first_genfname():
first = fsl.FIRST()
first.inputs.out_file = 'segment.nii'
first.inputs.output_type = "NIFTI_GZ"
value = first._gen_fname(name='original_segmentations')
expected_value = os.path.abspath('segment_all_fast_origsegs.nii.gz')
assert value == expected_value
first.inputs.method = 'none'
value = first._gen_fname(name='original_segmentations')
expected_value = os.path.abspath('segment_all_none_origsegs.nii.gz')
assert value == expected_value
first.inputs.method = 'auto'
first.inputs.list_of_specific_structures = ['L_Hipp', 'R_Hipp']
value = first._gen_fname(name='original_segmentations')
expected_value = os.path.abspath('segment_all_none_origsegs.nii.gz')
assert value == expected_value
| 38.566327 | 132 | 0.544737 |
0645dd865c562e2caa3eb4f21891f7dba10b4a57 | 23,070 | py | Python | cupyx/jit/_compile.py | viantirreau/cupy | cafe9af0e974ff88fc6aa43bf106e343a60fb983 | [
"MIT"
] | 1 | 2021-06-03T16:51:02.000Z | 2021-06-03T16:51:02.000Z | cupyx/jit/_compile.py | viantirreau/cupy | cafe9af0e974ff88fc6aa43bf106e343a60fb983 | [
"MIT"
] | null | null | null | cupyx/jit/_compile.py | viantirreau/cupy | cafe9af0e974ff88fc6aa43bf106e343a60fb983 | [
"MIT"
] | null | null | null | import ast
import collections
import inspect
import numbers
import re
import sys
import warnings
import numpy
from cupyx.jit._codeblock import CodeBlock
from cupy.core import _kernel
from cupyx.jit import _types
from cupyx.jit import _typerules
_typeclasses = (bool, numpy.bool_, numbers.Number)
Result = collections.namedtuple('Result', ['func_name', 'code', 'return_type'])
def transpile(func, attributes, mode, in_types, ret_type):
"""Transpile the target function
Args:
func (function): Target function.
attributes (list of str): Attributes of the generated CUDA function.
mode ('numpy' or 'cuda'): The rule for typecast.
in_types (list of _types.TypeBase): Types of the arguments.
ret_type (_types.TypeBase or None): Type of the return value.
"""
if not callable(func):
raise ValueError('`func` must be a callable object.')
if func.__name__ == '<lambda>':
raise NotImplementedError('Lambda function is not supported.')
attributes = ' '.join(attributes)
source = inspect.getsource(func)
lines = source.split('\n')
num_indent = len(lines[0]) - len(lines[0].lstrip())
source = '\n'.join([
line.replace(' ' * num_indent, '', 1) for line in lines])
cvars = inspect.getclosurevars(func)
consts = dict(**cvars.globals, **cvars.nonlocals, **cvars.builtins)
tree = ast.parse(source)
assert isinstance(tree, ast.Module)
assert len(tree.body) == 1
cuda_code, env = _transpile_function(
tree.body[0], attributes, mode, consts, in_types, ret_type)
cuda_code = ''.join([code + '\n' for code in env.preambles]) + cuda_code
return Result(
func_name=func.__name__,
code=cuda_code,
return_type=env.ret_type,
)
def _indent(lines, spaces=' '):
return [spaces + line for line in lines]
class CudaObject:
def __init__(self, code, ctype):
self.code = code
self.ctype = ctype
@property
def obj(self):
raise ValueError(f'Constant value is requried: {self.code}')
def __repr__(self):
return f'<CudaObject code = "{self.code}", type = {self.ctype}>'
class Constant:
def __init__(self, obj):
self._obj = obj
@property
def obj(self):
return self._obj
def __repr__(self):
return f'<Constant obj = "{self.obj}">'
class Range:
def __init__(self, start, stop, step, step_is_positive):
self.start = start
self.stop = stop
self.step = step
self.ctype = stop.ctype
self.step_is_positive = step_is_positive # True, False or None
if self.ctype.dtype.kind not in 'iu':
raise TypeError('range supports only for integer type.')
if self.ctype.dtype != start.ctype.dtype:
raise TypeError(f'dtype mismatch: {self.ctype} != {start.ctype}')
if self.ctype.dtype != step.ctype.dtype:
raise TypeError(f'dtype mismatch: {self.ctype} != {step.ctype}')
def is_constants(values):
return all(isinstance(x, Constant) for x in values)
class Environment:
"""Environment of the scope
Attributes:
mode ('numpy' or 'cuda'): The rule for typecast.
consts (dict): The dictionary with keys as the variable names and
the values as the data that is determined at compile-time.
params (dict): The dictionary of function arguments with keys as
the variable names and the values as the CudaObject.
locals (dict): The dictionary with keys as the variable names and the
values as the CudaObject stored at the local scope of the function.
ret_type (_types.TypeBase): The type of return value of the function.
If it is initialized to be ``None``, the return type must be
inferred until the end of transpilation of the function.
"""
def __init__(self, mode, consts, params, ret_type):
self.mode = mode
self.consts = consts
self.params = params
self.locals = {}
self.ret_type = ret_type
self.preambles = set()
def __getitem__(self, key):
if key in self.locals:
return self.locals[key]
if key in self.params:
return self.params[key]
if key in self.consts:
return self.consts[key]
return None
def __setitem__(self, key, value):
self.locals[key] = value
def _transpile_function(
func, attributes, mode, consts, in_types, ret_type):
"""Transpile the function
Args:
func (ast.FunctionDef): Target function.
attributes (str): The attributes of target function.
mode ('numpy' or 'cuda'): The rule for typecast.
consts (dict): The dictionary with keys as variable names and
values as concrete data object.
in_types (list of _types.TypeBase): The types of arguments.
ret_type (_types.TypeBase): The type of return value.
Returns:
code (str): The generated CUDA code.
env (Environment): More details of analysis result of the function,
which includes preambles, estimated return type and more.
"""
consts = dict([(k, Constant(v)) for k, v, in consts.items()])
if not isinstance(func, ast.FunctionDef):
# TODO(asi1024): Support for `ast.ClassDef`.
raise NotImplementedError('Not supported: {}'.format(type(func)))
if len(func.decorator_list) > 0:
if sys.version_info >= (3, 9):
# Code path for Python versions that support `ast.unparse`.
for deco in func.decorator_list:
deco_code = ast.unparse(deco)
if deco_code not in ['rawkernel', 'vectorize']:
warnings.warn(
f'Decorator {deco_code} may not supported in JIT.',
RuntimeWarning)
arguments = func.args
if arguments.vararg is not None:
raise NotImplementedError('`*args` is not supported currently.')
if len(arguments.kwonlyargs) > 0: # same length with `kw_defaults`.
raise NotImplementedError(
'keyword only arguments are not supported currently .')
if arguments.kwarg is not None:
raise NotImplementedError('`**kwargs` is not supported currently.')
if len(arguments.defaults) > 0:
raise NotImplementedError(
'Default values are not supported currently.')
args = [arg.arg for arg in arguments.args]
if len(args) != len(in_types):
raise TypeError(
f'{func.name}() takes {len(args)} positional arguments '
f'but {len(in_types)} were given.')
params = dict([(x, CudaObject(x, t)) for x, t in zip(args, in_types)])
env = Environment(mode, consts, params, ret_type)
body = _transpile_stmts(func.body, True, env)
params = ', '.join([f'{env[a].ctype} {a}' for a in args])
local_vars = [f'{v.ctype} {n};' for n, v in env.locals.items()]
if env.ret_type is None:
env.ret_type = _types.Void()
head = f'{attributes} {env.ret_type} {func.name}({params})'
code = CodeBlock(head, local_vars + body)
return str(code), env
def _eval_operand(op, args, env):
if is_constants(args):
pyfunc = _typerules.get_pyfunc(type(op))
return Constant(pyfunc(*[x.obj for x in args]))
ufunc = _typerules.get_ufunc(env.mode, type(op))
return _call_ufunc(ufunc, args, None, env)
def _call_ufunc(ufunc, args, dtype, env):
if len(args) != ufunc.nin:
raise ValueError('invalid number of arguments')
in_types = []
for x in args:
if is_constants([x]):
t = _typerules.get_ctype_from_scalar(env.mode, x.obj).dtype
else:
t = x.ctype.dtype
in_types.append(t)
if dtype is None:
op = ufunc._ops._guess_routine_from_in_types(tuple(in_types))
else:
op = ufunc._ops._guess_routine_from_dtype(dtype)
if op is None:
raise TypeError(
f'"{ufunc.name}" does not support for the input types: {in_types}')
if op.error_func is not None:
op.error_func()
if ufunc.nout == 1 and op.routine.startswith('out0 = '):
out_type = _types.Scalar(op.out_types[0])
expr = op.routine.replace('out0 = ', '')
in_params = []
for x, t in zip(args, op.in_types):
x = _astype_scalar(x, _types.Scalar(t), 'same_kind', env)
x = _to_cuda_object(x, env)
in_params.append(x)
can_use_inline_expansion = True
for i in range(ufunc.nin):
if len(list(re.finditer(r'in{}'.format(i), op.routine))) > 1:
can_use_inline_expansion = False
if can_use_inline_expansion:
# Code pass for readable generated code
for i, x in enumerate(in_params):
expr = expr.replace(f'in{i}', x.code)
expr = '(' + expr.replace('out0_type', str(out_type)) + ')'
env.preambles.add(ufunc._preamble)
else:
template_typenames = ', '.join([
f'typename T{i}' for i in range(ufunc.nin)])
ufunc_name = f'{ufunc.name}_{str(numpy.dtype(op.out_types[0]))}'
params = ', '.join([f'T{i} in{i}' for i in range(ufunc.nin)])
ufunc_code = f"""template <{template_typenames}>
__device__ {out_type} {ufunc_name}({params}) {{
return {expr};
}}
"""
env.preambles.add(ufunc_code)
in_params = ', '.join([a.code for a in in_params])
expr = f'{ufunc_name}({in_params})'
return CudaObject(expr, out_type)
raise NotImplementedError(f'ufunc `{ufunc.name}` is not supported.')
def _transpile_stmts(stmts, is_toplevel, env):
codeblocks = []
for stmt in stmts:
codeblocks.extend(_transpile_stmt(stmt, is_toplevel, env))
return codeblocks
def _transpile_stmt(stmt, is_toplevel, env):
"""Transpile the statement.
Returns (list of [CodeBlock or str]): The generated CUDA code.
"""
if isinstance(stmt, ast.ClassDef):
raise NotImplementedError('class is not supported currently.')
if isinstance(stmt, (ast.FunctionDef, ast.AsyncFunctionDef)):
raise NotImplementedError(
'Nested functions are not supported currently.')
if isinstance(stmt, ast.Return):
value = _transpile_expr(stmt.value, env)
value = _to_cuda_object(value, env)
t = value.ctype
if env.ret_type is None:
env.ret_type = t
elif env.ret_type != t:
raise ValueError(
f'Failed to infer the return type: {env.ret_type} or {t}')
return [f'return {value.code};']
if isinstance(stmt, ast.Delete):
raise NotImplementedError('`del` is not supported currently.')
if isinstance(stmt, ast.Assign):
if len(stmt.targets) != 1:
raise NotImplementedError('Not implemented.')
value = _transpile_expr(stmt.value, env)
target = stmt.targets[0]
if not isinstance(target, ast.Name):
target = _transpile_expr(target, env)
return [f'{target.code} = {value.code};']
name = target.id
if is_constants([value]):
if not isinstance(value.obj, _typeclasses):
if is_toplevel:
if env[name] is not None and not is_constants([env[name]]):
raise TypeError(f'Type mismatch of variable: `{name}`')
env.consts[name] = value
return []
else:
raise TypeError(
'Cannot assign constant value not at top-level.')
value = _to_cuda_object(value, env)
if env[name] is None:
env[name] = CudaObject(name, value.ctype)
elif is_constants([env[name]]):
raise TypeError('Type mismatch of variable: `{name}`')
elif env[name].ctype.dtype != value.ctype.dtype:
raise TypeError(
f'Data type mismatch of variable: `{name}`: '
f'{env[name].ctype.dtype} != {value.ctype.dtype}')
return [f'{name} = {value.code};']
if isinstance(stmt, ast.AugAssign):
value = _transpile_expr(stmt.value, env)
target = _transpile_expr(stmt.target, env)
assert isinstance(target, CudaObject)
value = _to_cuda_object(value, env)
result = _eval_operand(stmt.op, (target, value), env)
if not numpy.can_cast(
result.ctype.dtype, target.ctype.dtype, 'same_kind'):
raise TypeError('dtype mismatch')
return [f'{target.code} = {result.code};']
if isinstance(stmt, ast.For):
if len(stmt.orelse) > 0:
raise NotImplementedError('while-else is not supported.')
name = stmt.target.id
iters = _transpile_expr(stmt.iter, env)
if env[name] is None:
env[name] = CudaObject(stmt.target.id, iters.ctype)
elif env[name].ctype.dtype != iters.ctype.dtype:
raise TypeError(
f'Data type mismatch of variable: `{name}`: '
f'{env[name].ctype.dtype} != {iters.ctype.dtype}')
body = _transpile_stmts(stmt.body, False, env)
if not isinstance(iters, Range):
raise NotImplementedError(
'for-loop is supported only for range iterator.')
init_code = (f'{iters.ctype} '
f'__it = {iters.start.code}, '
f'__stop = {iters.stop.code}, '
f'__step = {iters.step.code}')
cond = '__step >= 0 ? __it < __stop : __it > __stop'
if iters.step_is_positive is True:
cond = '__it < __stop'
elif iters.step_is_positive is False:
cond = '__it > __stop'
head = f'for ({init_code}; {cond}; __it += __step)'
return [CodeBlock(head, [f'{name} = __it;'] + body)]
if isinstance(stmt, ast.AsyncFor):
raise ValueError('`async for` is not allowed.')
if isinstance(stmt, ast.While):
if len(stmt.orelse) > 0:
raise NotImplementedError('while-else is not supported.')
condition = _transpile_expr(stmt.test, env)
condition = _astype_scalar(condition, _types.bool_, 'unsafe', env)
condition = _to_cuda_object(condition, env)
body = _transpile_stmts(stmt.body, False, env)
head = f'while ({condition.code})'
return [CodeBlock(head, body)]
if isinstance(stmt, ast.If):
condition = _transpile_expr(stmt.test, env)
if is_constants([condition]):
stmts = stmt.body if condition.obj else stmt.orelse
return _transpile_stmts(stmts, is_toplevel, env)
head = f'if ({condition.code})'
then_body = _transpile_stmts(stmt.body, False, env)
else_body = _transpile_stmts(stmt.orelse, False, env)
return [CodeBlock(head, then_body), CodeBlock('else', else_body)]
if isinstance(stmt, (ast.With, ast.AsyncWith)):
raise ValueError('Switching contexts are not allowed.')
if isinstance(stmt, (ast.Raise, ast.Try)):
raise ValueError('throw/catch are not allowed.')
if isinstance(stmt, ast.Assert):
value = _transpile_expr(stmt.test, env)
if is_constants([value]):
assert value.obj
return [';']
else:
return ['assert(' + value + ');']
if isinstance(stmt, (ast.Import, ast.ImportFrom)):
raise ValueError('Cannot import modules from the target functions.')
if isinstance(stmt, (ast.Global, ast.Nonlocal)):
raise ValueError('Cannot use global/nonlocal in the target functions.')
if isinstance(stmt, ast.Expr):
value = _transpile_expr(stmt.value, env)
return [';'] if is_constants([value]) else [value + ';']
if isinstance(stmt, ast.Pass):
return [';']
if isinstance(stmt, ast.Break):
raise NotImplementedError('Not implemented.')
if isinstance(stmt, ast.Continue):
raise NotImplementedError('Not implemented.')
assert False
def _transpile_expr(expr, env):
"""Transpile the statement.
Returns (CudaObject): The CUDA code and its type of the expression.
"""
res = _transpile_expr_internal(expr, env)
if isinstance(res, Constant) and isinstance(res.obj, CudaObject):
return res.obj
else:
return res
def _transpile_expr_internal(expr, env):
if isinstance(expr, ast.BoolOp):
values = [_transpile_expr(e, env) for e in expr.values]
value = values[0]
for rhs in values[1:]:
value = _eval_operand(expr.op, (value, rhs), env)
return value
if isinstance(expr, ast.BinOp):
left = _transpile_expr(expr.left, env)
right = _transpile_expr(expr.right, env)
return _eval_operand(expr.op, (left, right), env)
if isinstance(expr, ast.UnaryOp):
value = _transpile_expr(expr.operand, env)
return _eval_operand(expr.op, (value,), env)
if isinstance(expr, ast.Lambda):
raise NotImplementedError('Not implemented.')
if isinstance(expr, ast.Compare):
values = [expr.left] + expr.comparators
if len(values) != 2:
raise NotImplementedError(
'Comparison of 3 or more values is not implemented.')
values = [_transpile_expr(e, env) for e in values]
return _eval_operand(expr.ops[0], values, env)
if isinstance(expr, ast.IfExp):
cond = _transpile_expr(expr.test, env)
x = _transpile_expr(expr.body, env)
y = _transpile_expr(expr.orelse, env)
if isinstance(expr, Constant):
return x if expr.obj else y
if cond.ctype.dtype.kind == 'c':
raise NotImplementedError('')
x = _to_cuda_object(x, env)
y = _to_cuda_object(y, env)
if x.ctype.dtype != y.ctype.dtype:
raise TypeError(
'Type mismatch in conditional expression.: '
f'{x.ctype.dtype} != {y.ctype.dtype}')
cond = _astype_scalar(cond, _types.Scalar(numpy.bool_), 'unsafe', env)
return CudaObject(f'({cond.code} ? {x.code} : {y.code})', x.ctype)
if isinstance(expr, ast.Call):
func = _transpile_expr(expr.func, env).obj
args = [_transpile_expr(x, env) for x in expr.args]
kwargs = dict([(kw.arg, _transpile_expr(kw.value, env))
for kw in expr.keywords])
if func is range:
if len(args) == 0:
raise TypeError('range expected at least 1 argument, got 0')
elif len(args) == 1:
start, stop, step = Constant(0), args[0], Constant(1)
elif len(args) == 2:
start, stop, step = args[0], args[1], Constant(1)
elif len(args) == 3:
start, stop, step = args
else:
raise TypeError(
f'range expected at most 3 argument, got {len(args)}')
step_is_positive = step.obj >= 0 if is_constants([step]) else None
start = _to_cuda_object(start, env)
stop = _to_cuda_object(stop, env)
step = _to_cuda_object(step, env)
return Range(start, stop, step, step_is_positive)
if is_constants(args) and is_constants(kwargs.values()):
# compile-time function call
args = [x.obj for x in args]
kwargs = dict([(k, v.obj) for k, v in kwargs.items()])
return Constant(func(*args, **kwargs))
if isinstance(func, _kernel.ufunc):
# ufunc call
dtype = kwargs.pop('dtype', Constant(None)).obj
if len(kwargs) > 0:
name = next(iter(kwargs))
raise TypeError(
f"'{name}' is an invalid keyword to ufunc {func.name}")
return _call_ufunc(func, args, dtype, env)
if inspect.isclass(func) and issubclass(func, _typeclasses):
# explicit typecast
if len(args) != 1:
raise TypeError(
f'function takes {func} invalid number of argument')
return _astype_scalar(args[0], _types.Scalar(func), 'unsafe', env)
raise NotImplementedError(
f'function call of `{func.__name__}` is not implemented')
if isinstance(expr, ast.Constant):
return Constant(expr.value)
if isinstance(expr, ast.Num):
# Deprecated since py3.8
return Constant(expr.n)
if isinstance(expr, ast.Str):
# Deprecated since py3.8
return Constant(expr.s)
if isinstance(expr, ast.Subscript):
value = _transpile_expr(expr.value, env)
index = _transpile_expr(expr.slice, env)
if is_constants([value, index]):
return Constant(value[index])
value = _to_cuda_object(value, env)
index = _to_cuda_object(index, env)
if not isinstance(value.ctype, _types.Array):
raise ValueError(f'{value.code} must be Array type.')
if value.ctype.ndim != 1:
raise NotImplementedError('Not implemented for ndim > 1.')
return CudaObject(
f'{value.code}[{index.code}]',
_types.Scalar(value.ctype.dtype))
if isinstance(expr, ast.Name):
value = env[expr.id]
if value is None:
raise NameError(
f'Unbound name: {expr.id} in line {expr.lineno}')
return env[expr.id]
if isinstance(expr, ast.Attribute):
value = _transpile_expr(expr.value, env)
if is_constants([value]):
return Constant(getattr(value.obj, expr.attr))
raise NotImplementedError('Not implemented: __getattr__')
if isinstance(expr, ast.Index):
return _transpile_expr(expr.value, env)
raise ValueError('Not supported: type {}'.format(type(expr)))
def _astype_scalar(x, ctype, casting, env):
if is_constants([x]):
return Constant(ctype.dtype.type(x.obj))
from_t = x.ctype.dtype
to_t = ctype.dtype
if from_t == to_t:
return x
# Uses casting rules for scalar values.
if not numpy.can_cast(from_t.type(0), to_t.type(0), casting):
raise TypeError(
f"Cannot cast from '{from_t}' to {to_t} "
f"with casting rule {casting}.")
if from_t.kind == 'c' and to_t.kind != 'c':
if to_t.kind != 'b':
warnings.warn(
'Casting complex values to real discards the imaginary part',
numpy.ComplexWarning)
return CudaObject(f'({ctype})({x.code}.real())', ctype)
return CudaObject(f'({ctype})({x.code})', ctype)
def _to_cuda_object(x, env):
if isinstance(x, CudaObject):
return x
if isinstance(x, Constant):
ctype = _typerules.get_ctype_from_scalar(env.mode, x.obj)
code = _types.get_cuda_code_from_constant(x.obj, ctype)
return CudaObject(code, ctype)
if isinstance(x, Range):
raise TypeError('range object cannot be interpreted as a cuda object.')
assert False
| 37.512195 | 79 | 0.604768 |
fecf46082ead19df64643f2afc87c22ab20bd816 | 3,069 | py | Python | tests/resonances/data/test_astdys.py | apetrov/resonances | 50be33536965e6a78371282a2d1803c53f11d112 | [
"MIT"
] | 4 | 2015-11-04T11:23:00.000Z | 2021-08-04T20:27:42.000Z | tests/resonances/data/test_astdys.py | apetrov/resonances | 50be33536965e6a78371282a2d1803c53f11d112 | [
"MIT"
] | 1 | 2021-08-04T20:57:22.000Z | 2021-08-07T09:17:14.000Z | tests/resonances/data/test_astdys.py | apetrov/resonances | 50be33536965e6a78371282a2d1803c53f11d112 | [
"MIT"
] | 1 | 2021-08-04T20:49:16.000Z | 2021-08-04T20:49:16.000Z | import resonances
from resonances.data.astdys import astdys
import pytest
import numpy as np
import pandas as pd
from pathlib import Path
import shutil
@pytest.fixture(autouse=True)
def run_around_tests():
resonances.config.set('catalog', 'cache/tests/small.csv')
resonances.config.set('astdys.catalog', 'tests/fixtures/small.cat')
Path('cache/tests').mkdir(parents=True, exist_ok=True)
yield
shutil.rmtree('cache/tests')
def test_required_config_values():
assert resonances.config.has('catalog') is True
assert resonances.config.has('catalog.date') is True
assert resonances.config.has('astdys.catalog.url') is True
assert resonances.config.has('astdys.catalog') is True
assert resonances.config.has('astdys.date') is True
def test_transform_astdys_catalog():
cat = astdys.transform_astdys_catalog()
assert 'a' in cat
assert 'e' in cat
assert 'inc' in cat
assert 'omega' in cat
assert 'Omega' in cat
assert 'M' in cat
assert 10 == len(cat)
assert 2.766 == pytest.approx(cat['a'].iloc[0], 0.01)
assert 0.07816 == pytest.approx(cat['e'].iloc[0], 0.01)
assert '6' == cat['num'].iloc[5]
assert 2.42456 == pytest.approx(cat['a'].iloc[5], 0.01)
assert 0.20328 == pytest.approx(cat['e'].iloc[5], 0.01)
assert 14.73973 == pytest.approx(cat['inc'].iloc[5] / np.pi * 180, 0.01)
assert 138.64293 == pytest.approx(cat['Omega'].iloc[5] / np.pi * 180, 0.01)
assert 239.70765 == pytest.approx(cat['omega'].iloc[5] / np.pi * 180, 0.01)
assert 242.94481 == pytest.approx(cat['M'].iloc[5] / np.pi * 180, 0.01)
def test_build():
astdys.build()
assert Path(resonances.config.get('catalog')).is_file() is True
cat = pd.read_csv('tests/fixtures/small.csv')
assert 10 == len(cat)
assert 2.766 == pytest.approx(cat['a'].iloc[0], 0.01)
assert 0.07816 == pytest.approx(cat['e'].iloc[0], 0.01)
assert 6 == cat['num'].iloc[5]
assert 2.42456 == pytest.approx(cat['a'].iloc[5], 0.01)
def test_load():
astdys.catalog = None
assert astdys.catalog is None
astdys.load()
assert astdys.catalog is not None
def test_search():
resonances.config.set('catalog', 'tests/fixtures/small.csv')
obj = astdys.search(6)
assert 2.42456 == pytest.approx(obj['a'], 0.01)
assert 0.20328 == pytest.approx(obj['e'], 0.01)
assert 14.73973 == pytest.approx(obj['inc'] / np.pi * 180, 0.01)
assert 138.64293 == pytest.approx(obj['Omega'] / np.pi * 180, 0.01)
assert 239.70765 == pytest.approx(obj['omega'] / np.pi * 180, 0.01)
assert 242.94481 == pytest.approx(obj['M'] / np.pi * 180, 0.01)
obj = astdys.search(10)
assert obj is not None
obj = astdys.search(11)
assert obj is None
obj = astdys.search(123456789)
assert obj is None
def test_search_possible_resonant_asteroids():
mmr = resonances.ThreeBody('4J-2S-1')
df = astdys.search_possible_resonant_asteroids(mmr)
asteroids = df['num'].tolist()
assert '7' in asteroids # these are FIIIIXTURES!
assert '9' in asteroids
| 33 | 79 | 0.661127 |
32cf1b041a4829f5e6884901881939c9c13f0d21 | 1,677 | py | Python | code/style_gan/StyleGAN_PyTorch/torchvision_sunner/transforms/base.py | YuTao0310/deepLearning | cc56ad418881d78d7c42b2fe66aa9542d5df78b2 | [
"MIT"
] | 89 | 2019-12-16T09:19:08.000Z | 2022-02-27T16:52:07.000Z | code/style_gan/StyleGAN_PyTorch/torchvision_sunner/transforms/base.py | YuTao0310/deepLearning | cc56ad418881d78d7c42b2fe66aa9542d5df78b2 | [
"MIT"
] | 6 | 2019-12-19T11:20:25.000Z | 2021-06-09T03:13:41.000Z | code/style_gan/StyleGAN_PyTorch/torchvision_sunner/transforms/base.py | YuTao0310/deepLearning | cc56ad418881d78d7c42b2fe66aa9542d5df78b2 | [
"MIT"
] | 15 | 2019-11-20T10:54:13.000Z | 2021-03-31T05:38:40.000Z | import numpy as np
import torch
"""
This class define the parent class of operation
Author: SunnerLi
"""
class OP():
"""
The parent class of each operation
The goal of this class is to adapting with different input format
"""
def work(self, tensor):
"""
The virtual function to define the process in child class
Arg: tensor - The np.ndarray object. The tensor you want to deal with
"""
raise NotImplementedError("You should define your own function in the class!")
def __call__(self, tensor):
"""
This function define the proceeding of the operation
There are different choice toward the tensor parameter
1. torch.Tensor and rank is CHW
2. np.ndarray and rank is CHW
3. torch.Tensor and rank is TCHW
4. np.ndarray and rank is TCHW
Arg: tensor - The tensor you want to operate
Ret: The operated tensor
"""
isTensor = type(tensor) == torch.Tensor
if isTensor:
tensor_type = tensor.type()
tensor = tensor.cpu().data.numpy()
if len(tensor.shape) == 3:
tensor = self.work(tensor)
elif len(tensor.shape) == 4:
tensor = np.asarray([self.work(_) for _ in tensor])
else:
raise Exception("We dont support the rank format {}".format(tensor.shape),
"If the rank of the tensor shape is only 2, you can call 'GrayStack()'")
if isTensor:
tensor = torch.from_numpy(tensor)
tensor = tensor.type(tensor_type)
return tensor | 34.22449 | 88 | 0.582588 |
0d775f25f23bb9e57f5ef06970e3c34b8b04255d | 53,412 | py | Python | custom/icds_reports/tests/agg_tests/test_fact_sheet_report.py | roboton/commcare-hq | 3ccbe59508d98dd1963ca87cf249dd2df8af8ecc | [
"BSD-3-Clause"
] | null | null | null | custom/icds_reports/tests/agg_tests/test_fact_sheet_report.py | roboton/commcare-hq | 3ccbe59508d98dd1963ca87cf249dd2df8af8ecc | [
"BSD-3-Clause"
] | 1 | 2021-06-02T04:45:16.000Z | 2021-06-02T04:45:16.000Z | custom/icds_reports/tests/agg_tests/test_fact_sheet_report.py | roboton/commcare-hq | 3ccbe59508d98dd1963ca87cf249dd2df8af8ecc | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime
from django.test.testcases import TestCase
from custom.icds_reports.const import AADHAR_SEEDED_BENEFICIARIES
from custom.icds_reports.views import FactSheetsReport
from custom.icds_reports.utils import get_location_level
class TestFactSheetReportMaternalAndChildNutritionICDS(TestCase):
maxDiff = None
def get_data(self):
config = {
'aggregation_level': 1,
'month': datetime(2017, 6, 1).date(),
'previous_month': datetime(2017, 5, 1).date(),
'two_before': datetime(2017, 4, 1).date(),
'category': 'maternal_and_child_nutrition',
'domain': 'icds-cas',
'sql_location': None
}
loc_level = get_location_level(config.get('aggregation_level'))
return FactSheetsReport(config=config, loc_level=loc_level).get_data()
def test_section_amount(self):
self.assertEqual(len(self.get_data()['config']['sections']), 1)
def test_nutrition_status_of_children_amount_of_config_rows(self):
self.assertEqual(len(self.get_data()['config']['sections'][0]['rows_config']), 13)
def test_status_weighed(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][0],
{
'average': {
'html': 70.27300303336703,
'sort_key': 70.27300303336703
},
'data': [
{'html': 'Weighing Efficiency (Children <5 weighed)'},
{'html': 67.58080313418218, 'sort_key': 67.58080313418218},
{'html': 70.27300303336703, 'sort_key': 70.27300303336703},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Weighing Efficiency (Children <5 weighed)',
'slug': 'status_weighed'
}
)
def test_status_height_efficiency(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][1],
{
'average': {
'html': 3.235591506572295,
'sort_key': 3.235591506572295
},
'data': [
{'html': 'Height measurement efficiency (Children <5 measured)'},
{'html': 1.1753183153770812, 'sort_key': 1.1753183153770812},
{'html': 3.235591506572295, 'sort_key': 3.235591506572295},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Height measurement efficiency (Children <5 measured)',
'slug': 'status_height_efficiency'
}
)
def test_nutrition_status_unweighed(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][2],
{
'data': [
{'html': 'Total number of unweighed children (0-5 Years)'},
{'html': 331, 'sort_key': 331},
{'html': 294, 'sort_key': 294},
{'html': 0}],
'data_source': 'AggChildHealthMonthlyDataSource',
'header': 'Total number of unweighed children (0-5 Years)',
'reverseColors': True,
'slug': 'nutrition_status_unweighed'
}
)
def test_severely_underweight(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][3],
{
'average': {
'html': 2.8776978417266186,
'sort_key': 2.8776978417266186
},
'data': [
{'html': 'Children from 0 - 5 years who are severely underweight (weight-for-age)'},
{'html': 2.1739130434782608, 'sort_key': 2.1739130434782608},
{'html': 2.8776978417266186, 'sort_key': 2.8776978417266186},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children from 0 - 5 years who are severely underweight (weight-for-age)',
'reverseColors': True,
'slug': 'severely_underweight'
}
)
def test_moderately_underweight(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][4],
{
'average': {
'html': 18.56115107913669,
'sort_key': 18.56115107913669
},
'data': [
{'html': 'Children from 0-5 years who are moderately underweight (weight-for-age)'},
{'html': 23.043478260869566, 'sort_key': 23.043478260869566},
{'html': 18.56115107913669, 'sort_key': 18.56115107913669},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children from 0-5 years who are moderately underweight (weight-for-age)',
'reverseColors': True,
'slug': 'moderately_underweight'
}
)
def test_status_normal(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][5],
{
'average': {
'html': 78.56115107913669,
'sort_key': 78.56115107913669
},
'data': [
{'html': 'Children from 0-5 years who are at normal weight-for-age'},
{'html': 74.78260869565217, 'sort_key': 74.78260869565217},
{'html': 78.56115107913669, 'sort_key': 78.56115107913669},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children from 0-5 years who are at normal weight-for-age',
'slug': 'status_normal'
}
)
def test_wasting_severe(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][6],
{
'average': {
'html': 0.0,
'sort_key': 0.0
},
'data': [
{'html': 'Children from 0 - 5 years with severe acute malnutrition (weight-for-height)'},
{'html': 16.666666666666668, 'sort_key': 16.666666666666668},
{'html': 0.0, 'sort_key': 0.0},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children from 0 - 5 years with severe acute malnutrition (weight-for-height)',
'reverseColors': True,
'slug': 'wasting_severe'
}
)
def test_wasting_moderate(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][7],
{
'average': {
'html': 25.806451612903224,
'sort_key': 25.806451612903224
},
'data': [
{'html': 'Children from 0 - 5 years with moderate acute malnutrition (weight-for-height)'},
{'html': 8.333333333333334, 'sort_key': 8.333333333333334},
{'html': 25.806451612903224, 'sort_key': 25.806451612903224},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children from 0 - 5 years with moderate acute malnutrition (weight-for-height)',
'reverseColors': True,
'slug': 'wasting_moderate'
}
)
def test_wasting_normal(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][8],
{
'average': {
'html': 61.29032258064516,
'sort_key': 61.29032258064516
},
'data': [
{'html': 'Children from 0 - 5 years with normal weight-for-height'},
{'html': 50.0, 'sort_key': 50.0},
{'html': 61.29032258064516, 'sort_key': 61.29032258064516},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children from 0 - 5 years with normal weight-for-height',
'slug': 'wasting_normal'
}
)
def test_stunting_severe(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][9],
{
'average': {
'html': 34.375,
'sort_key': 34.375,
},
'data': [
{'html': 'Children from 0 - 5 years with severe stunting (height-for-age)'},
{'html': 41.666666666666664, 'sort_key': 41.666666666666664},
{'html': 34.375, 'sort_key': 34.375},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children from 0 - 5 years with severe stunting (height-for-age)',
'reverseColors': True,
'slug': 'stunting_severe'
}
)
def test_stunting_moderate(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][10],
{
'average': {
'html': 25.0,
'sort_key': 25.0
},
'data': [
{'html': 'Children from 0 - 5 years with moderate stunting (height-for-age)'},
{'html': 33.333333333333336, 'sort_key': 33.333333333333336},
{'html': 25.0, 'sort_key': 25.0},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children from 0 - 5 years with moderate stunting (height-for-age)',
'reverseColors': True,
'slug': 'stunting_moderate'
}
)
def test_stunting_normal(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][11],
{
'average': {
'html': 40.625,
'sort_key': 40.625
},
'data': [
{'html': 'Children from 0 - 5 years with normal height-for-age'},
{'html': 33.333333333333336, 'sort_key': 33.333333333333336},
{'html': 40.625, 'sort_key': 40.625},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children from 0 - 5 years with normal height-for-age',
'slug': 'stunting_normal'
}
)
def test_low_birth_weight(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][12],
{
'average': {
'html': 33.333333333333336,
'sort_key': 33.333333333333336
},
'data': [
{'html': 'Percent of children born in month with low birth weight'},
{'html': 0.0, 'sort_key': 0.0},
{'html': 33.333333333333336, 'sort_key': 33.333333333333336},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Percent of children born in month with low birth weight',
'slug': 'low_birth_weight',
'reverseColors': True,
}
)
def test_rest_of_data(self):
data = self.get_data()
del(data['config']['sections'][0]['rows_config'])
self.assertDictEqual(
data,
{
'config': {
'category': 'maternal_and_child_nutrition',
'sections': [
{
'months': ['Apr 2017', 'May 2017', 'Jun 2017'],
'order': 1,
'section_title': 'Nutrition Status of Children',
'slug': 'nutrition_status_of_children'
}
],
'title': 'Maternal and Child Nutrition'
}
}
)
class TestFactSheetReportInterventions(TestCase):
def get_data(self):
config = {
'aggregation_level': 1,
'month': datetime(2017, 6, 1).date(),
'previous_month': datetime(2017, 5, 1).date(),
'two_before': datetime(2017, 4, 1).date(),
'category': 'interventions',
'domain': 'icds-cas',
'sql_location': None
}
loc_level = get_location_level(config.get('aggregation_level'))
return FactSheetsReport(config=config, loc_level=loc_level).get_data()
def test_section_amount(self):
self.assertEqual(len(self.get_data()['config']['sections']), 3)
def test_nutrition_status_of_children_amount_of_config_rows(self):
self.assertEqual(len(self.get_data()['config']['sections'][0]['rows_config']), 1)
def test_nutrition_status_of_children(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0],
{
'months': ['Apr 2017', 'May 2017', 'Jun 2017'],
'order': 1,
'rows_config': [
{
'average': {
'html': 10.79258010118044,
'sort_key': 10.79258010118044
},
'data': [
{'html': 'Children 1 year+ who have recieved complete immunization'
' required by age 1.'},
{'html': 10.526315789473685, 'sort_key': 10.526315789473685},
{'html': 10.79258010118044, 'sort_key': 10.79258010118044},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children 1 year+ who have recieved complete immunization required by age 1.',
'slug': 'fully_immunized'
}
],
'section_title': 'Nutrition Status of Children',
'slug': 'nutrition_status_of_children'
}
)
def test_nutrition_status_of_pregnant_women_amount_of_config_rows(self):
self.assertEqual(len(self.get_data()['config']['sections'][1]['rows_config']), 6)
def test_severe_anemic(self):
self.assertDictEqual(
self.get_data()['config']['sections'][1]['rows_config'][0],
{
'average': {
'html': 22.580645161290324,
'sort_key': 22.580645161290324
},
'data': [
{'html': 'Pregnant women who are anemic'},
{'html': 16.346153846153847, 'sort_key': 16.346153846153847},
{'html': 22.580645161290324, 'sort_key': 22.580645161290324},
{'html': 0}
],
'data_source': 'AggCCSRecordMonthlyDataSource',
'format': 'percent',
'header': 'Pregnant women who are anemic',
'reverseColors': True,
'slug': 'severe_anemic'
}
)
def test_tetanus_complete(self):
self.assertDictEqual(
self.get_data()['config']['sections'][1]['rows_config'][1],
{
'average': {
'html': 0.0,
'sort_key': 0.0
},
'data': [
{'html': 'Pregnant women with tetanus completed'},
{'html': 0.0, 'sort_key': 0.0},
{'html': 0.0, 'sort_key': 0.0},
{'html': 0}
],
'data_source': 'AggCCSRecordMonthlyDataSource',
'format': 'percent',
'header': 'Pregnant women with tetanus completed',
'slug': 'tetanus_complete'
}
)
def test_anc_1(self):
self.assertDictEqual(
self.get_data()['config']['sections'][1]['rows_config'][2],
{
'average': {
'html': 0.0,
'sort_key': 0.0
},
'data': [
{'html': 'Pregnant women who had at least 1 ANC visit by delivery'},
{'html': 0.0, 'sort_key': 0.0},
{'html': 0.0, 'sort_key': 0.0},
{'html': 0}
],
'data_source': 'AggCCSRecordMonthlyDataSource',
'format': 'percent',
'header': 'Pregnant women who had at least 1 ANC visit by delivery',
'slug': 'anc_1'
}
)
def test_anc_2(self):
self.assertDictEqual(
self.get_data()['config']['sections'][1]['rows_config'][3],
{
'average': {
'html': 0.0,
'sort_key': 0.0
},
'data': [
{'html': 'Pregnant women who had at least 2 ANC visits by delivery'},
{'html': 0.0, 'sort_key': 0.0},
{'html': 0.0, 'sort_key': 0.0},
{'html': 0}
],
'data_source': 'AggCCSRecordMonthlyDataSource',
'format': 'percent',
'header': 'Pregnant women who had at least 2 ANC visits by delivery',
'slug': 'anc_2'
}
)
def test_anc_3(self):
self.assertDictEqual(
self.get_data()['config']['sections'][1]['rows_config'][4],
{
'average': {
'html': 0.0,
'sort_key': 0.0
},
'data': [
{'html': 'Pregnant women who had at least 3 ANC visits by delivery'},
{'html': 0.0, 'sort_key': 0.0},
{'html': 0.0, 'sort_key': 0.0},
{'html': 0}
],
'data_source': 'AggCCSRecordMonthlyDataSource',
'format': 'percent',
'header': 'Pregnant women who had at least 3 ANC visits by delivery',
'slug': 'anc_3'
}
)
def test_anc_4(self):
self.assertDictEqual(
self.get_data()['config']['sections'][1]['rows_config'][5],
{
'average': {
'html': 0.0,
'sort_key': 0.0
},
'data': [
{'html': 'Pregnant women who had at least 4 ANC visits by delivery'},
{'html': 0.0, 'sort_key': 0.0},
{'html': 0.0, 'sort_key': 0.0},
{'html': 0}
],
'data_source': 'AggCCSRecordMonthlyDataSource',
'format': 'percent',
'header': 'Pregnant women who had at least 4 ANC visits by delivery',
'slug': 'anc_4'
}
)
def test_awc_infrastructure_amount_of_config_rows(self):
self.assertEqual(len(self.get_data()['config']['sections'][2]['rows_config']), 3)
def test_medicine_kits(self):
self.assertDictEqual(
self.get_data()['config']['sections'][2]['rows_config'][0],
{
'average': {
'html': 66.66666666666667,
'sort_key': 66.66666666666667
},
'data': [
{'html': 'AWCs reported medicine kit'},
{'html': 78.57142857142857, 'sort_key': 78.57142857142857},
{'html': 66.66666666666667, 'sort_key': 66.66666666666667},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'format': 'percent',
'header': 'AWCs reported medicine kit',
'slug': 'medicine_kits'
}
)
def test_baby_weighing_scale(self):
self.assertDictEqual(
self.get_data()['config']['sections'][2]['rows_config'][1],
{
'average': {
'html': 80.0,
'sort_key': 80.0
},
'data': [
{'html': 'AWCs reported weighing scale for infants'},
{'html': 71.42857142857143, 'sort_key': 71.42857142857143},
{'html': 80.0, 'sort_key': 80.0},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'format': 'percent',
'header': 'AWCs reported weighing scale for infants',
'slug': 'baby_weighing_scale'
}
)
def test_adult_weighing_scale(self):
self.assertDictEqual(
self.get_data()['config']['sections'][2]['rows_config'][2],
{
'average': {
'html': 30.0,
'sort_key': 30.0
},
'data': [
{'html': 'AWCs reported weighing scale for mother and child'},
{'html': 21.428571428571427, 'sort_key': 21.428571428571427},
{'html': 30.0, 'sort_key': 30.0},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'format': 'percent',
'header': 'AWCs reported weighing scale for mother and child',
'slug': 'adult_weighing_scale'
}
)
def test_rest_of_data(self):
data = self.get_data()
del (data['config']['sections'][0]['rows_config'])
del (data['config']['sections'][1]['rows_config'])
del (data['config']['sections'][2]['rows_config'])
self.assertDictEqual(
data,
{
'config': {
'category': 'interventions',
'sections': [
{
'months': ['Apr 2017', 'May 2017', 'Jun 2017'],
'order': 1,
'section_title': 'Nutrition Status of Children',
'slug': 'nutrition_status_of_children'
},
{
'months': ['Apr 2017', 'May 2017', 'Jun 2017'],
'order': 3,
'section_title': 'Nutrition Status of Pregnant Women',
'slug': 'nutrition_status_of_pregnant_women'},
{
'months': ['Apr 2017', 'May 2017', 'Jun 2017'],
'order': 5,
'section_title': 'AWC Infrastructure',
'slug': 'awc_infrastructure'
}
],
'title': 'Interventions'
}
}
)
class TestFactSheetReportBehaviorChange(TestCase):
def get_data(self):
config = {
'aggregation_level': 1,
'month': datetime(2017, 6, 1).date(),
'previous_month': datetime(2017, 5, 1).date(),
'two_before': datetime(2017, 4, 1).date(),
'category': 'behavior_change',
'domain': 'icds-cas',
'sql_location': None
}
loc_level = get_location_level(config.get('aggregation_level'))
return FactSheetsReport(config=config, loc_level=loc_level).get_data()
def test_section_amount(self):
self.assertEqual(len(self.get_data()['config']['sections']), 2)
def test_child_feeding_indicators_amount_of_config_rows(self):
self.assertEqual(len(self.get_data()['config']['sections'][0]['rows_config']), 7)
def test_breastfed_at_birth(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][0],
{
'average': {
'html': 40.0,
'sort_key': 40.0
},
'data': [
{'html': 'Percentage of children who were put to the breast within one hour of birth.'},
{'html': 33.333333333333336, 'sort_key': 33.333333333333336},
{'html': 40.0, 'sort_key': 40.0},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Percentage of children who were put to the breast within one hour of birth.',
'slug': 'breastfed_at_birth'
}
)
def test_exclusively_breastfed(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][1],
{
'average': {
'html': 56.0,
'sort_key': 56.0
},
'data': [
{'html': 'Infants 0-6 months of age who are fed exclusively with breast milk.'},
{'html': 22.413793103448278, 'sort_key': 22.413793103448278},
{'html': 56.0, 'sort_key': 56.0},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Infants 0-6 months of age who are fed exclusively with breast milk.',
'slug': 'exclusively_breastfed'
}
)
def test_cf_initiation(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][2],
{
'average': {
'html': 85.0,
'sort_key': 85.0
},
'data': [
{'html': 'Children between 6 - 8 months given timely '
'introduction to solid, semi-solid or soft food.'},
{'html': 34.375, 'sort_key': 34.375},
{'html': 85.0, 'sort_key': 85.0},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children between 6 - 8 months given timely introduction to solid, '
'semi-solid or soft food.',
'slug': 'cf_initiation'
}
)
def test_complementary_feeding(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][3],
{
'average': {
'html': 72.5609756097561,
'sort_key': 72.5609756097561
},
'data': [
{'html': 'Children from 6 - 24 months complementary feeding'},
{'html': 31.288343558282207, 'sort_key': 31.288343558282207},
{'html': 72.5609756097561, 'sort_key': 72.5609756097561},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children from 6 - 24 months complementary feeding',
'slug': 'complementary_feeding'
}
)
def test_diet_diversity(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][4],
{
'average': {
'html': 57.926829268292686,
'sort_key': 57.926829268292686
},
'data': [
{'html': 'Children from 6 - 24 months consuming at least 4 food groups'},
{'html': 25.153374233128833, 'sort_key': 25.153374233128833},
{'html': 57.926829268292686, 'sort_key': 57.926829268292686},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children from 6 - 24 months consuming at least 4 food groups',
'slug': 'diet_diversity'
}
)
def test_diet_quantity(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][5],
{
'average': {
'html': 47.5609756097561,
'sort_key': 47.5609756097561
},
'data': [
{'html': 'Children from 6 - 24 months consuming adequate food'},
{'html': 24.539877300613497, 'sort_key': 24.539877300613497},
{'html': 47.5609756097561, 'sort_key': 47.5609756097561},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children from 6 - 24 months consuming adequate food',
'slug': 'diet_quantity'
}
)
def test_handwashing(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][6],
{
'average': {
'html': 68.29268292682927,
'sort_key': 68.29268292682927
},
'data': [
{'html': 'Children from 6 - 24 months whose mothers handwash before feeding'},
{'html': 26.993865030674847, 'sort_key': 26.993865030674847},
{'html': 68.29268292682927, 'sort_key': 68.29268292682927},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'format': 'percent',
'header': 'Children from 6 - 24 months whose mothers handwash before feeding',
'slug': 'handwashing'
}
)
def test_nutrition_status_of_pregnant_women_amount_of_config_rows(self):
self.assertEqual(len(self.get_data()['config']['sections'][1]['rows_config']), 3)
def test_resting(self):
self.assertDictEqual(
self.get_data()['config']['sections'][1]['rows_config'][0],
{
'average': {
'html': 89.6774193548387,
'sort_key': 89.6774193548387
},
'data': [
{'html': 'Women resting during pregnancy'},
{'html': 53.84615384615385, 'sort_key': 53.84615384615385},
{'html': 89.6774193548387, 'sort_key': 89.6774193548387},
{'html': 0}
],
'data_source': 'AggCCSRecordMonthlyDataSource',
'format': 'percent',
'header': 'Women resting during pregnancy',
'slug': 'resting'
}
)
def test_extra_meal(self):
self.assertDictEqual(
self.get_data()['config']['sections'][1]['rows_config'][1],
{
'average': {
'html': 89.6774193548387,
'sort_key': 89.6774193548387
},
'data': [
{'html': 'Women eating an extra meal during pregnancy'},
{'html': 53.84615384615385, 'sort_key': 53.84615384615385},
{'html': 89.6774193548387, 'sort_key': 89.6774193548387},
{'html': 0}
],
'data_source': 'AggCCSRecordMonthlyDataSource',
'format': 'percent',
'header': 'Women eating an extra meal during pregnancy',
'slug': 'extra_meal'
}
)
def test_trimester(self):
self.assertDictEqual(
self.get_data()['config']['sections'][1]['rows_config'][2],
{
'average': {
'html': 72.15189873417721,
'sort_key': 72.15189873417721
},
'data': [
{'html': 'Pregnant women in 3rd trimester counselled on immediate and '
'exclusive breastfeeding during home visit'},
{'html': 39.62264150943396, 'sort_key': 39.62264150943396},
{'html': 72.15189873417721, 'sort_key': 72.15189873417721},
{'html': 0}
],
'data_source': 'AggCCSRecordMonthlyDataSource',
'format': 'percent',
'header': 'Pregnant women in 3rd trimester counselled on immediate and '
'exclusive breastfeeding during home visit',
'slug': 'trimester'
}
)
def test_rest_of_data(self):
data = self.get_data()
del (data['config']['sections'][0]['rows_config'])
del (data['config']['sections'][1]['rows_config'])
self.assertDictEqual(
data,
{
'config': {
'category': 'behavior_change',
'sections': [
{
'months': ['Apr 2017', 'May 2017', 'Jun 2017'],
'order': 2,
'section_title': 'Child Feeding Indicators',
'slug': 'child_feeding_indicators'
},
{
'months': ['Apr 2017', 'May 2017', 'Jun 2017'],
'order': 3,
'section_title': 'Nutrition Status of Pregnant Women',
'slug': 'nutrition_status_of_pregnant_women'
}
],
'title': 'Behavior Change'
}
}
)
class TestFactSheetReportWaterSanitationAndHygiene(TestCase):
def get_data(self):
config = {
'aggregation_level': 1,
'month': datetime(2017, 6, 1).date(),
'previous_month': datetime(2017, 5, 1).date(),
'two_before': datetime(2017, 4, 1).date(),
'category': 'water_sanitation_and_hygiene',
'domain': 'icds-cas',
'sql_location': None
}
loc_level = get_location_level(config.get('aggregation_level'))
return FactSheetsReport(config=config, loc_level=loc_level).get_data()
def test_section_amount(self):
self.assertEqual(len(self.get_data()['config']['sections']), 1)
def test_awc_infrastructure_amount_of_config_rows(self):
self.assertEqual(len(self.get_data()['config']['sections'][0]['rows_config']), 2)
def test_clean_water(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][0],
{
'average': {
'html': 96.66666666666667,
'sort_key': 96.66666666666667
},
'data': [
{'html': 'AWCs reported clean drinking water'},
{'html': 100.0, 'sort_key': 100.0},
{'html': 96.66666666666667, 'sort_key': 96.66666666666667},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'format': 'percent',
'header': 'AWCs reported clean drinking water',
'slug': 'clean_water'
}
)
def test_functional_toilet(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][1],
{
'average': {
'html': 50.0,
'sort_key': 50.0
},
'data': [
{'html': 'AWCs reported functional toilet'},
{'html': 57.142857142857146, 'sort_key': 57.142857142857146},
{'html': 50.0, 'sort_key': 50.0},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'format': 'percent',
'header': 'AWCs reported functional toilet',
'slug': 'functional_toilet'
}
)
def test_rest_of_data(self):
data = self.get_data()
del (data['config']['sections'][0]['rows_config'])
self.assertDictEqual(
data,
{
'config': {
'category': 'water_sanitation_and_hygiene',
'sections': [
{
'months': ['Apr 2017', 'May 2017', 'Jun 2017'],
'order': 5,
'section_title': 'AWC Infrastructure',
'slug': 'awc_infrastructure'
}
],
'title': 'Water Sanitation And Hygiene'
}
}
)
class TestFactSheetReportDemographics(TestCase):
maxDiff = None
def get_data(self):
config = {
'aggregation_level': 1,
'month': datetime(2017, 6, 1).date(),
'previous_month': datetime(2017, 5, 1).date(),
'two_before': datetime(2017, 4, 1).date(),
'category': 'demographics',
'domain': 'icds-cas',
'sql_location': None
}
loc_level = get_location_level(config.get('aggregation_level'))
return FactSheetsReport(config=config, loc_level=loc_level).get_data()
def test_section_amount(self):
self.assertEqual(len(self.get_data()['config']['sections']), 1)
def test_demographics_amount_of_config_rows(self):
self.assertEqual(len(self.get_data()['config']['sections'][0]['rows_config']), 18)
def test_cases_household(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][0],
{
'average': {
'html': 2799,
'sort_key': 2799
},
'data': [
{'html': 'Number of Households'},
{'html': 2792, 'sort_key': 2792},
{'html': 2799, 'sort_key': 2799},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'header': 'Number of Households',
'slug': 'cases_household',
}
)
def test_cases_person_all(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][1],
{
'average': {
'html': 966,
'sort_key': 966
},
'data': [
{'html': 'Total Number of Household Members'},
{'html': 958, 'sort_key': 958},
{'html': 966, 'sort_key': 966},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'header': 'Total Number of Household Members',
'slug': 'cases_person_all',
}
)
def test_cases_person_beneficiary(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][2],
{
'average': {
'html': 1609,
'sort_key': 1609
},
'data': [
{'html': 'Total number of members enrolled at AWC'},
{'html': 1525, 'sort_key': 1525},
{'html': 1609, 'sort_key': 1609},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'header': 'Total number of members enrolled at AWC',
'slug': 'cases_person_beneficiary_v2',
}
)
def test_aadhar(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][3],
{
'average': {
'html': 21.504039776258544,
'sort_key': 21.504039776258544
},
'data': [
{'html': AADHAR_SEEDED_BENEFICIARIES},
{'html': 19.540983606557376, 'sort_key': 19.540983606557376},
{'html': 21.504039776258544, 'sort_key': 21.504039776258544},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'format': 'percent',
'header': AADHAR_SEEDED_BENEFICIARIES,
'slug': 'aadhar',
}
)
def test_cases_ccs_pregnant_all(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][4],
{
'average': {
'html': 155,
'sort_key': 155
},
'data': [
{'html': 'Total pregnant women '},
{'html': 104, 'sort_key': 104},
{'html': 155, 'sort_key': 155},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'header': 'Total pregnant women ',
'slug': 'cases_ccs_pregnant_all',
}
)
def test_cases_ccs_pregnant(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][5],
{
'average': {
'html': 155,
'sort_key': 155
},
'data': [
{'html': 'Total pregnant women enrolled for services at AWC'},
{'html': 104, 'sort_key': 104},
{'html': 155, 'sort_key': 155},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'header': 'Total pregnant women enrolled for services at AWC',
'slug': 'cases_ccs_pregnant',
}
)
def test_cases_ccs_lactating_all(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][6],
{
'average': {
'html': 167,
'sort_key': 167
},
'data': [
{'html': 'Total lactating women'},
{'html': 160, 'sort_key': 160},
{'html': 167, 'sort_key': 167},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'header': 'Total lactating women',
'slug': 'cases_ccs_lactating_all',
}
)
def test_cases_ccs_lactating(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][7],
{
'average': {
'html': 167,
'sort_key': 167
},
'data': [
{'html': 'Total lactating women registered for services at AWC'},
{'html': 160, 'sort_key': 160},
{'html': 167, 'sort_key': 167},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'header': 'Total lactating women registered for services at AWC',
'slug': 'cases_ccs_lactating',
}
)
def test_cases_child_health_all(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][8],
{
'average': {
'html': 1287,
'sort_key': 1287
},
'data': [
{'html': 'Total children (0-6 years)'},
{'html': 1261, 'sort_key': 1261},
{'html': 1287, 'sort_key': 1287},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'header': 'Total children (0-6 years)',
'slug': 'cases_child_health_all',
}
)
def test_cases_child_health(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][9],
{
'average': {
'html': 1287,
'sort_key': 1287
},
'data': [
{'html': 'Total chldren (0-6 years) enrolled for Anganwadi Services'},
{'html': 1261, 'sort_key': 1261},
{'html': 1287, 'sort_key': 1287},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'header': 'Total chldren (0-6 years) enrolled for Anganwadi Services',
'slug': 'cases_child_health',
}
)
def test_zero(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][10],
{
'average': {
'html': 5,
'sort_key': 5
},
'data': [
{'html': 'Children (0-28 days) enrolled for Anganwadi Services'},
{'html': 5, 'sort_key': 5},
{'html': 5, 'sort_key': 5},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'header': 'Children (0-28 days) enrolled for Anganwadi Services',
'slug': 'zero',
}
)
def test_one(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][11],
{
'average': {
'html': 45,
'sort_key': 45
},
'data': [
{'html': 'Children (28 days - 6 months) enrolled for Anganwadi Services'},
{'html': 53, 'sort_key': 53},
{'html': 45, 'sort_key': 45},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'header': 'Children (28 days - 6 months) enrolled for Anganwadi Services',
'slug': 'one',
}
)
def test_two(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][12],
{
'average': {
'html': 51,
'sort_key': 51
},
'data': [
{'html': 'Children (6 months - 1 year) enrolled for Anganwadi Services'},
{'html': 44, 'sort_key': 44},
{'html': 51, 'sort_key': 51},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'header': 'Children (6 months - 1 year) enrolled for Anganwadi Services',
'slug': 'two',
}
)
def test_three(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][13],
{
'average': {
'html': 213,
'sort_key': 213
},
'data': [
{'html': 'Children (1 year - 3 years) enrolled for Anganwadi Services'},
{'html': 237, 'sort_key': 237},
{'html': 213, 'sort_key': 213},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'header': 'Children (1 year - 3 years) enrolled for Anganwadi Services',
'slug': 'three',
}
)
def test_four(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][14],
{
'average': {
'html': 973,
'sort_key': 973
},
'data': [
{'html': 'Children (3 years - 6 years) enrolled for Anganwadi Services'},
{'html': 922, 'sort_key': 922},
{'html': 973, 'sort_key': 973},
{'html': 0}
],
'data_source': 'AggChildHealthMonthlyDataSource',
'header': 'Children (3 years - 6 years) enrolled for Anganwadi Services',
'slug': 'four',
}
)
def test_cases_person_adolescent_girls_11_14_all(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][15],
{
'average': {
'html': 24,
'sort_key': 24
},
'data': [
{'html': 'Number of adolescent girls (11-14 years)'},
{'html': 33, 'sort_key': 33},
{'html': 24, 'sort_key': 24},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'header': 'Number of adolescent girls (11-14 years)',
'slug': 'cases_person_adolescent_girls_11_14_all_v2',
}
)
def test_cases_person_adolescent_girls_15_18_all(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][17],
{
'average': {
'html': 12,
'sort_key': 12
},
'data': [
{'html': 'Adolescent girls (15-18 years)'},
{'html': 18, 'sort_key': 18},
{'html': 12, 'sort_key': 12},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'header': 'Adolescent girls (15-18 years)',
'slug': 'cases_person_adolescent_girls_15_18_all',
}
)
def test_cases_person_adolescent_girls_11_14(self):
self.assertDictEqual(
self.get_data()['config']['sections'][0]['rows_config'][16],
{
'average': {
'html': 2,
'sort_key': 2
},
'data': [
{'html': 'Number of out of school adolescent girls (11-14 years)'},
{'html': 2, 'sort_key': 2},
{'html': 2, 'sort_key': 2},
{'html': 0}
],
'data_source': 'AggAWCMonthlyDataSource',
'header': 'Number of out of school adolescent girls (11-14 years)',
'reverseColors': True,
'slug': 'cases_person_adolescent_girls_11_14_out_of_school',
}
)
def test_rest_of_data(self):
data = self.get_data()
del (data['config']['sections'][0]['rows_config'])
self.assertDictEqual(
data,
{
'config': {
'category': 'demographics',
'sections': [
{
'months': ['Apr 2017', 'May 2017', 'Jun 2017'],
'order': 4,
'section_title': 'Demographics',
'slug': 'demographics'
}
],
'title': 'Demographics'
}
}
)
| 39.01534 | 112 | 0.444432 |
fb7d6d23c5f0e2f0d9d55dbef7906f2e368fa462 | 646 | py | Python | example/zygosity2-missing.py | argriffing/hmmus | c91696735eed420bbf13b3cb177d3a652efaff69 | [
"MIT"
] | 9 | 2015-02-05T15:58:29.000Z | 2017-11-18T09:25:34.000Z | example/zygosity2-missing.py | argriffing/hmmus | c91696735eed420bbf13b3cb177d3a652efaff69 | [
"MIT"
] | null | null | null | example/zygosity2-missing.py | argriffing/hmmus | c91696735eed420bbf13b3cb177d3a652efaff69 | [
"MIT"
] | null | null | null | """
Analyze a fasta file with missing data using two-state HMM.
"""
import numpy as np
from hmmus import hmm
from hmmus import zygohelper
# state 0: homozygous and missing
# state 1: heterozygous
# emission 0: ACGT
# emission 1: MRWSYK
# missing data: N
g_letter_to_emission = {
'A':0, 'C':0, 'G':0, 'T':0,
'M':1, 'R':1, 'W':1, 'S':1, 'Y':1, 'K':1,
'N':hmm.MISSING}
g_default_trans = np.array([
[0.9, 0.1],
[0.1, 0.9]])
g_default_emiss = np.array([
[0.9, 0.1],
[0.5, 0.5]])
if __name__ == '__main__':
zygohelper.run(
g_letter_to_emission, g_default_trans, g_default_emiss, __doc__)
| 19.575758 | 76 | 0.602167 |
40abf7b9e3d615115f4cb7c3065305f48dd4fabb | 7,034 | py | Python | speedy/test_recall.py | microsoft/SpeedyRec | 1186120f8c5ee8c904676bb2f19892d064c984e6 | [
"MIT"
] | 23 | 2021-03-29T03:08:27.000Z | 2022-01-19T06:41:19.000Z | speedy/test_recall.py | microsoft/SpeedyRec | 1186120f8c5ee8c904676bb2f19892d064c984e6 | [
"MIT"
] | 3 | 2021-11-29T04:03:38.000Z | 2022-01-19T08:57:54.000Z | speedy/test_recall.py | microsoft/SpeedyRec | 1186120f8c5ee8c904676bb2f19892d064c984e6 | [
"MIT"
] | 2 | 2021-04-13T07:26:32.000Z | 2021-09-13T12:16:50.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import math
import numpy as np
import hnswlib
import random
import os
import torch
import logging
from tqdm import tqdm
from .test_auc import load_model
from .infer_news_vecs import pad_to_fix_len
def get_day_item(data_dir, date, news_index, pad_news_index):
filename = os.path.join(data_dir, "daily_news_{}.tsv".format(date))
with open(filename, 'r', encoding='utf-8') as f:
day_item = [news_index[x] if x in news_index else pad_news_index for x in f.read().strip().split('\t')]
return day_item
def CreatIndex(batchsize, all_item_vec, itemid, mode):
item_num = np.shape(all_item_vec)[0]
p = hnswlib.Index(space=mode, dim=np.shape(all_item_vec)[-1])
p.init_index(max_elements=item_num, ef_construction=200, M=100)
p.set_ef(1500)
for step in range(math.ceil(item_num / batchsize)):
start = step * batchsize
end = min((step + 1) * batchsize, item_num)
batch_array = all_item_vec[start:end]
p.add_items(batch_array, itemid[start:end])
return p
def hist_pos(data_dir, date, news_index, user_log_length):
filename = os.path.join(data_dir, "history_positive_{}.tsv".format(date))
history = []
mask = []
positems = []
with open(filename, 'r', encoding='utf-8') as f:
for line in f.readlines():
linesplit = line.strip().split('\t')
uid, hist, pos = linesplit
temp_hist = [news_index[x] for x in hist.split(';')]
temp_hist, temp_mask = pad_to_fix_len(temp_hist,user_log_length)
history.append(temp_hist)
mask.append(temp_mask)
positems.append([news_index[x] for x in pos.split(';')])
return history, mask, positems
def generate_user_data(history, mask, all_item_embedding, user_batch_size=512):
# history = np.array(history)
step = math.ceil(len(history) / user_batch_size)
for i in range(step):
start = user_batch_size * i
end = min(user_batch_size * (i + 1), len(history))
index = history[start:end]
index = np.array(index)
batch_mask = mask[start:end]
yield all_item_embedding[index], batch_mask
def CreatUserEmbed(history, mask, all_item_embedding, user_batch_size, model, device):
user_embedding = []
with torch.no_grad():
user_progress = tqdm(enumerate(generate_user_data(history, mask, all_item_embedding, user_batch_size)),
dynamic_ncols=True,
total=(math.ceil(len(history) / user_batch_size)))
for step, batch in user_progress:
log_vecs, log_mask = batch
log_vecs = torch.from_numpy(log_vecs).cuda(non_blocking=True).float().to(device)
log_mask = torch.Tensor(log_mask).cuda(non_blocking=True).float().to(device)
user_vecs = model.user_encoder.infer_user_vec(log_vecs, log_mask).to(torch.device("cpu")).detach().numpy()
user_embedding.extend(user_vecs)
user_embedding = np.array(user_embedding)
return user_embedding
def get_result(user_embedding, positems, p, hnswlib_batch_size=5000):
recall20 = 0
recall50 = 0
recall100 = 0
recall200 = 0
recall500 = 0
all_ans = []
for step in range(math.ceil(len(user_embedding) / hnswlib_batch_size)):
start = step * hnswlib_batch_size
end = min((step + 1) * hnswlib_batch_size, len(user_embedding))
batch_array = user_embedding[start:end]
ans, dis = p.knn_query(batch_array, k=200)
all_ans.extend(ans)
user_num = len(user_embedding)
for i in range(user_num):
ans = all_ans[i]
pos = set(positems[i])
ans200 = set(ans[:200])
ans100 = set(ans[:100])
ans50 = set(ans[:50])
ans20 = set(ans[:20])
recall20 += len(set.intersection(pos, ans20)) / len(pos)
recall50 += len(set.intersection(pos, ans50)) / len(pos)
recall100 += len(set.intersection(pos, ans100)) / len(pos)
recall200 += len(set.intersection(pos, ans200)) / len(pos)
recall20 = recall20 / user_num
recall50 = recall50 / user_num
recall100 = recall100 / user_num
recall200 = recall200 / user_num
return np.array([recall20, recall50, recall100, recall200])
def consine_similarity(item_embedding):
num = item_embedding.shape[0]
norm = np.linalg.norm(item_embedding, axis=1)
norm = np.dot(np.expand_dims(norm, 1), np.expand_dims(norm, 0))
simi = np.dot(item_embedding, np.transpose(item_embedding))
simi = simi / (norm + 1e-6)
simi = simi * (1 - np.eye(num, num))
return np.sum(simi) / (num ** 2 - num)
def get_similarity(item_embedding):
pair_num = 10
sample_num = 10000
allitems = list(range(1, len(item_embedding) - 1))
cos = 0
for i in range(sample_num):
items = random.sample(allitems, pair_num)
cos += consine_similarity(item_embedding[items])
cos = cos / sample_num
return cos
def test_recall(args,news_index,news_embed):
logging.info('------start test recll------')
user_batch_size = args.test_batch_size
hnswlib_batch_size = 5000
mode = 'ip' # 'cosine'
device = torch.device("cuda") if args.enable_gpu else torch.device("cpu")
model = load_model(args)
model.to(device)
res = np.array([0.0] * 4)
simi = get_similarity(news_embed)
pad_news_index = len(news_embed)
pad_news = np.zeros((1,news_embed.shape[-1]))
news_embed = np.concatenate([news_embed,pad_news],0)
date_recall = list(range(1,3))
data_dir = os.path.join(args.root_data_dir,'testdata/daily_recall')
for date in date_recall:
day_item = get_day_item(data_dir,date,news_index,pad_news_index)
item_num = len(day_item)
item_embedding = news_embed[day_item]
p = CreatIndex(hnswlib_batch_size, item_embedding, day_item, mode)
p.set_ef(1500)
history, mask, positems = hist_pos(data_dir,date,news_index,args.user_log_length)
user_embedding = CreatUserEmbed(history, mask, news_embed, user_batch_size, model, device)
user_num = len(positems)
logging.info('recall_data: {} user_num:{}, item_num:{}'.format(date,user_num,item_num))
day_res = get_result(user_embedding, positems, p, hnswlib_batch_size)
res += day_res
info = '{}-: recall20:{},recall50:{},recall100:{},recall200:{}'.format(date,
day_res[0], day_res[1],
day_res[2], day_res[3])
logging.info(info)
res = res / len(date_recall)
info = 'Avg. simi:{} recall20:{},recall50:{},recall100:{},recall200:{}'.format(simi,
res[0], res[1],
res[2], res[3])
logging.info(info) | 37.216931 | 118 | 0.627666 |
07dcefeed9d77fa9495e8deb7ef69b2fc49c8532 | 251 | py | Python | my_blog_project/views.py | nitinkumar388/Django-Blog-Web-Project | 6c3d09b342645701063b1e66523e77f62bed9db3 | [
"MIT"
] | null | null | null | my_blog_project/views.py | nitinkumar388/Django-Blog-Web-Project | 6c3d09b342645701063b1e66523e77f62bed9db3 | [
"MIT"
] | null | null | null | my_blog_project/views.py | nitinkumar388/Django-Blog-Web-Project | 6c3d09b342645701063b1e66523e77f62bed9db3 | [
"MIT"
] | null | null | null |
from django.http.response import HttpResponse
from django.http import HttpResponse
from django.shortcuts import HttpResponseRedirect
from django.urls import reverse
def index(request):
return HttpResponseRedirect(reverse('App_Blog:blog_list')) | 25.1 | 62 | 0.832669 |
85ebea1745148762b4efe2dd26f7aa7622043393 | 823 | py | Python | tests/test_partfile.py | jseppanen/disco | 23ef8badfc7c539672e8834875d9908974b646dc | [
"BSD-3-Clause"
] | 2 | 2016-05-09T17:03:08.000Z | 2016-07-19T11:27:54.000Z | tests/test_partfile.py | jseppanen/disco | 23ef8badfc7c539672e8834875d9908974b646dc | [
"BSD-3-Clause"
] | null | null | null | tests/test_partfile.py | jseppanen/disco | 23ef8badfc7c539672e8834875d9908974b646dc | [
"BSD-3-Clause"
] | null | null | null | from disco.test import DiscoJobTestFixture, DiscoTestCase
from collections import defaultdict
class PartitionFileTestCase(DiscoJobTestFixture, DiscoTestCase):
inputs = ['%s:%s' % i for x in xrange(10)
for i in zip([x] * 10, range(x, x + 10))]
def getdata(self, path):
return '%s\n' % path
@staticmethod
def map(e, params):
return [e.split(':')]
@property
def answers(self):
for x in xrange(10):
yield '%s' % x, sum(xrange(x, x + 10))
def runTest(self):
results = defaultdict(int)
for k, v in self.results:
results[k] += int(v)
self.assertEquals(results, dict(self.answers))
class MultiPartitionFileTestCase(PartitionFileTestCase):
@property
def nr_reduces(self):
return len(self.inputs)
| 27.433333 | 64 | 0.616039 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.